From 43278ba0520295c51e2405907d0f39a4820cc49c Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 31 Jan 2025 11:14:29 +0000 Subject: [PATCH 01/13] Update generator to facilitate big endian --- crates/stdarch-gen-arm/src/big_endian.rs | 221 +++++++++++++ crates/stdarch-gen-arm/src/context.rs | 4 + crates/stdarch-gen-arm/src/expression.rs | 51 ++- crates/stdarch-gen-arm/src/intrinsic.rs | 393 +++++++++++++++++------ crates/stdarch-gen-arm/src/main.rs | 1 + 5 files changed, 565 insertions(+), 105 deletions(-) create mode 100644 crates/stdarch-gen-arm/src/big_endian.rs diff --git a/crates/stdarch-gen-arm/src/big_endian.rs b/crates/stdarch-gen-arm/src/big_endian.rs new file mode 100644 index 0000000000..5bf1a720ea --- /dev/null +++ b/crates/stdarch-gen-arm/src/big_endian.rs @@ -0,0 +1,221 @@ +use crate::expression::LetVariant; +use crate::wildstring::WildStringPart; +use crate::{ + expression::{Expression, IdentifierType}, + typekinds::*, + wildstring::WildString, +}; + +/// Simplifies creating a string that can be used in an Expression, as Expression +/// expects all strings to be `WildString` +fn create_single_wild_string(name: &str) -> WildString { + WildString(vec![WildStringPart::String(name.to_string())]) +} + +/// Creates an Identifier with name `name` with no wildcards. This, for example, +/// can be used to create variables, function names or arbitrary input. Is is +/// extremely flexible. +pub fn create_symbol_identifier(arbitrary_string: &str) -> Expression { + let identifier_name = create_single_wild_string(arbitrary_string); + Expression::Identifier(identifier_name, IdentifierType::Symbol) +} + +/// To compose the simd_shuffle! call we need: +/// - simd_shuffle!(, , ) +/// +/// Here we are creating a string version of the `` that can be used as an +/// Expression Identifier +/// +/// In textual form `a: int32x4_t` which has 4 lanes would generate: +/// ``` +/// [0, 1, 2, 3] +/// ``` +fn create_array(lanes: u32, reverse: bool) -> Option { + if reverse { + match lanes { + 1 => None, /* Makes no sense to shuffle an array of size 1 */ + 2 => Some("[1, 0]".to_string()), + 3 => Some("[2, 1, 0]".to_string()), + 4 => Some("[3, 2, 1, 0]".to_string()), + 8 => Some("[7, 6, 5, 4, 3, 2, 1, 0]".to_string()), + 16 => Some("[15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]".to_string()), + _ => panic!("Incorrect vector number of vector lanes: {}", lanes), + } + } else { + match lanes { + 1 => None, /* Makes no sense to shuffle an array of size 1 */ + 2 => Some("[0, 1]".to_string()), + 3 => Some("[0, 1, 2]".to_string()), + 4 => Some("[0, 1, 2, 3]".to_string()), + 8 => Some("[0, 1, 2, 3, 4, 5, 6, 7]".to_string()), + 16 => Some("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]".to_string()), + _ => panic!("Incorrect vector number of vector lanes: {}", lanes), + } + } +} + +/// Creates: `let : = ` +pub fn create_let_variable( + variable_name: &str, + type_kind: &TypeKind, + expression: Expression, +) -> Expression { + let identifier_name = create_single_wild_string(variable_name); + Expression::Let(LetVariant::WithType( + identifier_name, + type_kind.clone(), + Box::new(expression), + )) +} + +pub fn create_mut_let_variable( + variable_name: &str, + type_kind: &TypeKind, + expression: Expression, +) -> Expression { + let identifier_name = create_single_wild_string(variable_name); + Expression::Let(LetVariant::MutWithType( + identifier_name, + type_kind.clone(), + Box::new(expression), + )) +} + +pub fn type_has_tuple(type_kind: &TypeKind) -> bool { + if let TypeKind::Vector(vector_type) = type_kind { + vector_type.tuple_size().is_some() + } else { + false + } +} + +pub fn make_variable_mutable(variable_name: &str, type_kind: &TypeKind) -> Expression { + let mut_variable = format!( + "let mut {}: {} = {}", + variable_name, + type_kind.to_string(), + variable_name + ); + let identifier_name = create_single_wild_string(&mut_variable); + Expression::Identifier(identifier_name, IdentifierType::Symbol) +} + +/// For creating shuffle calls, accepts function pointers for formatting for tuple +/// types and types without a tuple +/// +/// Example: +/// +/// `a: int32x4_t` with formatting function `create_shuffle_call_fmt` creates: +/// ``` +/// simd_shuffle!(a, a, [0, 1, 2, 3]) +/// ``` +/// +/// `a: int32x4x2_t` creates: +/// ``` +/// a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3]) +/// a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3]) +/// ``` +fn create_shuffle_internal( + variable_name: &String, + type_kind: &TypeKind, + reverse: bool, + fmt_tuple: fn(variable_name: &String, idx: u32, array_lanes: &String) -> String, + fmt: fn(variable_name: &String, type_kind: &TypeKind, array_lanes: &String) -> String, +) -> Option { + let TypeKind::Vector(vector_type) = type_kind else { + return None; + }; + + let lane_count = vector_type.lanes(); + let Some(array_lanes) = create_array(lane_count, reverse) else { + return None; + }; + + let tuple_count = vector_type.tuple_size().map_or_else(|| 0, |t| t.to_int()); + + if tuple_count > 0 { + let capacity_estimate: usize = + tuple_count as usize * (lane_count as usize + ((variable_name.len() + 2) * 3)); + let mut string_builder = String::with_capacity(capacity_estimate); + + /* .idx = simd_shuffle!(.idx, .idx, []) */ + for idx in 0..tuple_count { + let formatted = fmt_tuple(variable_name, idx, &array_lanes); + string_builder += formatted.as_str(); + } + Some(create_symbol_identifier(&string_builder)) + } else { + /* Generate a list of shuffles for each tuple */ + let expression = fmt(variable_name, type_kind, &array_lanes); + Some(create_symbol_identifier(&expression)) + } +} + +fn create_assigned_tuple_shuffle_call_fmt( + variable_name: &String, + idx: u32, + array_lanes: &String, +) -> String { + format!( + "{variable_name}.{idx} = simd_shuffle!({variable_name}.{idx}, {variable_name}.{idx}, {array_lanes});\n", + variable_name = variable_name, + idx = idx, + array_lanes = array_lanes + ) +} + +fn create_assigned_shuffle_call_fmt( + variable_name: &String, + type_kind: &TypeKind, + array_lanes: &String, +) -> String { + format!( + "let {variable_name}: {type_kind} = simd_shuffle!({variable_name}, {variable_name}, {array_lanes})", + type_kind = type_kind.to_string(), + variable_name = variable_name, + array_lanes = array_lanes + ) +} + +fn create_shuffle_call_fmt( + variable_name: &String, + _type_kind: &TypeKind, + array_lanes: &String, +) -> String { + format!( + "simd_shuffle!({variable_name}, {variable_name}, {array_lanes})", + variable_name = variable_name, + array_lanes = array_lanes + ) +} + +/// Create a `simd_shuffle!(<...>, [...])` call, where the output is stored +/// in a variable named `variable_name` +pub fn create_assigned_shuffle_call( + variable_name: &String, + type_kind: &TypeKind, + reverse: bool, +) -> Option { + create_shuffle_internal( + variable_name, + type_kind, + reverse, + create_assigned_tuple_shuffle_call_fmt, + create_assigned_shuffle_call_fmt, + ) +} + +/// Create a `simd_shuffle!(<...>, [...])` call +pub fn create_shuffle_call( + variable_name: &String, + type_kind: &TypeKind, + reverse: bool, +) -> Option { + create_shuffle_internal( + variable_name, + type_kind, + reverse, + create_assigned_tuple_shuffle_call_fmt, + create_shuffle_call_fmt, + ) +} diff --git a/crates/stdarch-gen-arm/src/context.rs b/crates/stdarch-gen-arm/src/context.rs index aa29eda820..44b5208f39 100644 --- a/crates/stdarch-gen-arm/src/context.rs +++ b/crates/stdarch-gen-arm/src/context.rs @@ -35,6 +35,10 @@ pub struct GlobalContext { pub arch_cfgs: Vec, #[serde(default)] pub uses_neon_types: bool, + + /// Should the yaml file automagically generate big endian shuffling + #[serde(default)] + pub auto_big_endian: Option, } /// Context of an intrinsic group diff --git a/crates/stdarch-gen-arm/src/expression.rs b/crates/stdarch-gen-arm/src/expression.rs index 4a572db3e8..b796bf675c 100644 --- a/crates/stdarch-gen-arm/src/expression.rs +++ b/crates/stdarch-gen-arm/src/expression.rs @@ -9,6 +9,7 @@ use std::fmt; use std::str::FromStr; use crate::intrinsic::Intrinsic; +use crate::wildstring::WildStringPart; use crate::{ context::{self, Context, VariableType}, intrinsic::{Argument, LLVMLink, StaticDefinition}, @@ -29,6 +30,7 @@ pub enum IdentifierType { pub enum LetVariant { Basic(WildString, Box), WithType(WildString, TypeKind, Box), + MutWithType(WildString, TypeKind, Box), } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -155,9 +157,11 @@ impl Expression { cl_ptr_ex.pre_build(ctx)?; arg_exs.iter_mut().try_for_each(|ex| ex.pre_build(ctx)) } - Self::Let(LetVariant::Basic(_, ex) | LetVariant::WithType(_, _, ex)) => { - ex.pre_build(ctx) - } + Self::Let( + LetVariant::Basic(_, ex) + | LetVariant::WithType(_, _, ex) + | LetVariant::MutWithType(_, _, ex), + ) => ex.pre_build(ctx), Self::CastAs(ex, _) => ex.pre_build(ctx), Self::Multiply(lhs, rhs) | Self::Xor(lhs, rhs) => { lhs.pre_build(ctx)?; @@ -214,7 +218,8 @@ impl Expression { Self::Let(variant) => { let (var_name, ex, ty) = match variant { LetVariant::Basic(var_name, ex) => (var_name, ex, None), - LetVariant::WithType(var_name, ty, ex) => { + LetVariant::WithType(var_name, ty, ex) + | LetVariant::MutWithType(var_name, ty, ex) => { if let Some(w) = ty.wildcard() { ty.populate_wildcard(ctx.local.provide_type_wildcard(w)?)?; } @@ -285,9 +290,11 @@ impl Expression { // Nested structures that aren't inherently unsafe, but could contain other expressions // that might be. Self::Assign(_var, exp) => exp.requires_unsafe_wrapper(ctx_fn), - Self::Let(LetVariant::Basic(_, exp) | LetVariant::WithType(_, _, exp)) => { - exp.requires_unsafe_wrapper(ctx_fn) - } + Self::Let( + LetVariant::Basic(_, exp) + | LetVariant::WithType(_, _, exp) + | LetVariant::MutWithType(_, _, exp), + ) => exp.requires_unsafe_wrapper(ctx_fn), Self::Array(exps) => exps.iter().any(|exp| exp.requires_unsafe_wrapper(ctx_fn)), Self::Multiply(lhs, rhs) | Self::Xor(lhs, rhs) => { lhs.requires_unsafe_wrapper(ctx_fn) || rhs.requires_unsafe_wrapper(ctx_fn) @@ -330,6 +337,32 @@ impl Expression { } } } + + /// Determine if an expression is a `static_assert<...>` function call. + pub fn is_static_assert(&self) -> bool { + match self { + Expression::FnCall(fn_call) => match fn_call.0.as_ref() { + Expression::Identifier(wild_string, _) => { + if let WildStringPart::String(function_name) = &wild_string.0[0] { + function_name.starts_with("static_assert") + } else { + false + } + } + _ => panic!("Badly defined function call: {:?}", fn_call), + }, + _ => false, + } + } + + /// Determine if an espression is a LLVM binding + pub fn is_llvm_link(&self) -> bool { + if let Expression::LLVMLink(_) = self { + true + } else { + false + } + } } impl FromStr for Expression { @@ -422,6 +455,10 @@ impl ToTokens for Expression { let var_ident = format_ident!("{}", var_name.to_string()); tokens.append_all(quote! { let #var_ident: #ty = #exp }) } + Self::Let(LetVariant::MutWithType(var_name, ty, exp)) => { + let var_ident = format_ident!("{}", var_name.to_string()); + tokens.append_all(quote! { let mut #var_ident: #ty = #exp }) + } Self::Assign(var_name, exp) => { /* If we are dereferencing a variable to assign a value \ * the 'format_ident!' macro does not like the asterix */ diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index cabe58f9d6..0101423f1a 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -10,6 +10,10 @@ use std::ops::RangeInclusive; use std::str::FromStr; use crate::assert_instr::InstructionAssertionsForBaseType; +use crate::big_endian::{ + create_assigned_shuffle_call, create_let_variable, create_mut_let_variable, + create_shuffle_call, create_symbol_identifier, make_variable_mutable, type_has_tuple, +}; use crate::context::{GlobalContext, GroupContext}; use crate::input::{InputSet, InputSetEntry}; use crate::predicate_forms::{DontCareMethod, PredicateForm, PredicationMask, ZeroingMethod}; @@ -284,6 +288,7 @@ pub struct Signature { pub name: WildString, /// List of function arguments, leave unset or empty for no arguments pub arguments: Vec, + /// Function return type, leave unset for void pub return_type: Option, @@ -493,12 +498,14 @@ impl LLVMLink { let mut sig_name = ctx.local.signature.name.clone(); sig_name.prepend_str("_"); + let argv = self + .arguments + .clone() + .unwrap_or_else(|| ctx.local.signature.arguments.clone()); + let mut sig = Signature { name: sig_name, - arguments: self - .arguments - .clone() - .unwrap_or_else(|| ctx.local.signature.arguments.clone()), + arguments: argv, return_type: self .return_type .clone() @@ -905,6 +912,13 @@ pub struct Intrinsic { pub base_type: Option, /// Attributes for the function pub attr: Option>, + /// Big endian variant for composing, this gets populated internally + #[serde(skip)] + pub big_endian_compose: Vec, + /// Big endian sometimes needs the bits inverted from the default reverse + /// to work correctly + #[serde(default)] + pub big_endian_inverse: Option, } impl Intrinsic { @@ -1014,6 +1028,12 @@ impl Intrinsic { variant.post_build(&mut ctx)?; + /* If we should generate big endian we shall do so. It's possible + * we may not want to in some instances */ + if ctx.global.auto_big_endian.unwrap_or(false) { + self.generate_big_endian(&mut variant); + } + if let Some(n_variant_op) = ctx.local.n_variant_op().cloned() { variant.generate_n_variant(n_variant_op, &mut ctx) } else { @@ -1021,6 +1041,146 @@ impl Intrinsic { } } + /// Add a big endian implementation + fn generate_big_endian(&self, variant: &mut Intrinsic) { + /* We can't always blindly reverse the bits we sometimes need a + * different order - thus this allows us to have the ability to do so + * without having to play codegolf witht the yaml AST */ + let should_reverse = { + if let Some(should_reverse) = variant.big_endian_inverse { + should_reverse + } else if variant.compose.len() == 1 { + match &variant.compose[0] { + Expression::FnCall(fn_call) => fn_call.0.to_string() == "transmute", + _ => false, + } + } else { + false + } + }; + + let mut big_endian_expressions: Vec = Vec::new(); + + /* We cannot assign `a.0 = ` directly to a function parameter so + * need to make them mutable */ + for function_parameter in &variant.signature.arguments { + if type_has_tuple(&function_parameter.kind) { + /* We do not want to be creating a `mut` variant if the type + * has one lane. If it has one lane that means it does not need + * shuffling */ + if let TypeKind::Vector(vector_type) = &function_parameter.kind { + if vector_type.lanes() == 1 { + continue; + } + } + + let mutable_variable = make_variable_mutable( + &function_parameter.name.to_string(), + &function_parameter.kind, + ); + big_endian_expressions.push(mutable_variable); + } + } + + /* Possibly shuffle the vectors */ + for function_parameter in &variant.signature.arguments { + if let Some(shuffle_call) = create_assigned_shuffle_call( + &function_parameter.name.to_string(), + &function_parameter.kind, + should_reverse, + ) { + big_endian_expressions.push(shuffle_call); + } + } + + if !big_endian_expressions.is_empty() { + Vec::reserve( + &mut variant.big_endian_compose, + big_endian_expressions.len() + variant.compose.len(), + ); + let mut expression = &variant.compose[0]; + let needs_reordering = expression.is_static_assert() || expression.is_llvm_link(); + + /* We want to keep the asserts and llvm links at the start of + * the new big_endian_compose vector that we are creating */ + if needs_reordering { + let mut expression_idx = 0; + while expression.is_static_assert() || expression.is_llvm_link() { + /* Add static asserts and llvm links to the start of the + * vector */ + variant.big_endian_compose.push(expression.clone()); + expression_idx += 1; + expression = &variant.compose[expression_idx]; + } + + /* Add the big endian specific expressions */ + variant.big_endian_compose.extend(big_endian_expressions); + + /* Add the rest of the expressions */ + for i in expression_idx..variant.compose.len() { + variant.big_endian_compose.push(variant.compose[i].clone()); + } + } else { + /* If we do not need to reorder anything then immediately add + * the expressions from the big_endian_expressions and + * concatinate the compose vector */ + variant.big_endian_compose.extend(big_endian_expressions); + variant + .big_endian_compose + .extend(variant.compose.iter().cloned()); + } + } + + /* If we have a return type, there is a possibility we want to generate + * a shuffle call */ + if let Some(return_type) = &variant.signature.return_type { + let return_value = variant + .compose + .last() + .expect("Cannot define a return type with an empty function body"); + + /* If we do not create a shuffle call we do not need modify the + * return value and append to the big endian ast array. A bit confusing + * as in code we are making the final call before caputuring the return + * value of the intrinsic that has been called.*/ + let ret_val_name = "ret_val".to_string(); + if let Some(simd_shuffle_call) = + create_shuffle_call(&ret_val_name, return_type, should_reverse) + { + /* There is a possibility that the funcion arguments did not + * require big endian treatment, thus we need to now add the + * original function body before appending the return value.*/ + if variant.big_endian_compose.is_empty() { + variant + .big_endian_compose + .extend(variant.compose.iter().cloned()); + } + + /* Now we shuffle the return value - we are creating a new + * return value for the intrinsic. */ + let return_value_variable = if type_has_tuple(&return_type) { + create_mut_let_variable(&ret_val_name, return_type, return_value.clone()) + } else { + create_let_variable(&ret_val_name, return_type, return_value.clone()) + }; + + /* Remove the last item which will be the return value */ + variant.big_endian_compose.pop(); + variant.big_endian_compose.push(return_value_variable); + variant.big_endian_compose.push(simd_shuffle_call); + if type_has_tuple(return_type) { + /* We generated `tuple_count` number of calls to shuffle + * re-assigning each tuple however those generated calls do + * not make the parent function return. So we add the return + * value here */ + variant + .big_endian_compose + .push(create_symbol_identifier(&ret_val_name)); + } + } + } + } + /// Implement a "zeroing" (_z) method by calling an existing "merging" (_m) method, as required. fn generate_zeroing_pass_through( &mut self, @@ -1505,120 +1665,157 @@ impl Intrinsic { } } -impl ToTokens for Intrinsic { - fn to_tokens(&self, tokens: &mut TokenStream) { - let signature = &self.signature; - let fn_name = signature.fn_name().to_string(); - let target_feature = self.target_features.join(","); - let safety = self - .safety - .as_ref() - .expect("safety should be determined during `pre_build`"); - - if let Some(doc) = &self.doc { - let mut doc = vec![doc.to_string()]; - - doc.push(format!("[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/{})", &signature.doc_name())); +/// Some intrinsics require a little endian and big endian implementation, others +/// do not +enum Endianness { + Little, + Big, + NA, +} - if safety.has_doc_comments() { - doc.push("## Safety".to_string()); - for comment in safety.doc_comments() { - doc.push(format!(" * {comment}")); - } - } else { - assert!( - safety.is_safe(), - "{fn_name} is both public and unsafe, and so needs safety documentation" - ); +/// Based on the endianess will create the appropriate intrinsic, or simply +/// create the desired intrinsic without any endianess +fn create_tokens(intrinsic: &Intrinsic, endianness: Endianness, tokens: &mut TokenStream) { + let signature = &intrinsic.signature; + let fn_name = signature.fn_name().to_string(); + let target_feature = intrinsic.target_features.join(","); + let safety = intrinsic + .safety + .as_ref() + .expect("safety should be determined during `pre_build`"); + + if let Some(doc) = &intrinsic.doc { + let mut doc = vec![doc.to_string()]; + + doc.push(format!("[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/{})", &signature.doc_name())); + + if safety.has_doc_comments() { + doc.push("## Safety".to_string()); + for comment in safety.doc_comments() { + doc.push(format!(" * {comment}")); } - - tokens.append_all(quote! { #(#[doc = #doc])* }); } else { assert!( - matches!(self.visibility, FunctionVisibility::Private), - "{fn_name} needs to be private, or to have documentation." - ); - assert!( - !safety.has_doc_comments(), - "{fn_name} needs a documentation section for its safety comments." + safety.is_safe(), + "{fn_name} is both public and unsafe, and so needs safety documentation" ); } - tokens.append_all(quote! { #[inline] }); - - /* If we have manually defined attributes on the block of yaml with - * 'attr:' we want to add them */ - if let Some(attr) = &self.attr { - /* Scan to see if we have defined `FnCall: [target_feature, ['']]`*/ - if !has_target_feature_attr(attr) { - /* If not add the default one that is defined at the top of - * the yaml file. This does mean we scan the attributes vector - * twice, once to see if the `target_feature` exists and again - * to actually append the tokens. We could impose that the - * `target_feature` call has to be the first argument of the - * `attr` block */ - tokens.append_all(quote! { - #[target_feature(enable = #target_feature)] - }); - } + tokens.append_all(quote! { #(#[doc = #doc])* }); + } else { + assert!( + matches!(intrinsic.visibility, FunctionVisibility::Private), + "{fn_name} needs to be private, or to have documentation." + ); + assert!( + !safety.has_doc_comments(), + "{fn_name} needs a documentation section for its safety comments." + ); + } - /* Target feature will get added here */ - let attr_expressions = &mut attr.iter().peekable(); - while let Some(ex) = attr_expressions.next() { - let mut inner = TokenStream::new(); - ex.to_tokens(&mut inner); - tokens.append(Punct::new('#', Spacing::Alone)); - tokens.append(Group::new(Delimiter::Bracket, inner)); - } - } else { + tokens.append_all(quote! { #[inline] }); + + match endianness { + Endianness::Little => tokens.append_all(quote! { #[cfg(target_endian = "little")] }), + Endianness::Big => tokens.append_all(quote! { #[cfg(target_endian = "big")] }), + Endianness::NA => {} + }; + + /* If we have manually defined attributes on the block of yaml with + * 'attr:' we want to add them */ + if let Some(attr) = &intrinsic.attr { + /* Scan to see if we have defined `FnCall: [target_feature, ['']]`*/ + if !has_target_feature_attr(attr) { + /* If not add the default one that is defined at the top of + * the yaml file. This does mean we scan the attributes vector + * twice, once to see if the `target_feature` exists and again + * to actually append the tokens. We could impose that the + * `target_feature` call has to be the first argument of the + * `attr` block */ tokens.append_all(quote! { #[target_feature(enable = #target_feature)] }); } - if let Some(assert_instr) = &self.assert_instr { - if !assert_instr.is_empty() { - InstructionAssertionsForBaseType(&assert_instr, &self.base_type.as_ref()) - .to_tokens(tokens) - } + /* Target feature will get added here */ + let attr_expressions = &mut attr.iter().peekable(); + while let Some(ex) = attr_expressions.next() { + let mut inner = TokenStream::new(); + ex.to_tokens(&mut inner); + tokens.append(Punct::new('#', Spacing::Alone)); + tokens.append(Group::new(Delimiter::Bracket, inner)); } + } else { + tokens.append_all(quote! { + #[target_feature(enable = #target_feature)] + }); + } - match &self.visibility { - FunctionVisibility::Public => tokens.append_all(quote! { pub }), - FunctionVisibility::Private => {} - } - if safety.is_unsafe() { - tokens.append_all(quote! { unsafe }); + if let Some(assert_instr) = &intrinsic.assert_instr { + if !assert_instr.is_empty() { + InstructionAssertionsForBaseType(&assert_instr, &intrinsic.base_type.as_ref()) + .to_tokens(tokens) } - tokens.append_all(quote! { #signature }); + } - // If the intrinsic function is explicitly unsafe, we populate `body_default_safety` with - // the implementation. No explicit unsafe blocks are required. - // - // If the intrinsic is safe, we fill `body_default_safety` until we encounter an expression - // that requires an unsafe wrapper, then switch to `body_unsafe`. Since the unsafe - // operation (e.g. memory access) is typically the last step, this tends to minimises the - // amount of unsafe code required. - let mut body_default_safety = TokenStream::new(); - let mut body_unsafe = TokenStream::new(); - let mut body_current = &mut body_default_safety; - for (pos, ex) in self.compose.iter().with_position() { - if safety.is_safe() && ex.requires_unsafe_wrapper(&fn_name) { - body_current = &mut body_unsafe; - } - ex.to_tokens(body_current); - let is_last = matches!(pos, itertools::Position::Last | itertools::Position::Only); - let is_llvm_link = matches!(ex, Expression::LLVMLink(_)); - if !is_last && !is_llvm_link { - body_current.append(Punct::new(';', Spacing::Alone)); - } + match &intrinsic.visibility { + FunctionVisibility::Public => tokens.append_all(quote! { pub }), + FunctionVisibility::Private => {} + } + if safety.is_unsafe() { + tokens.append_all(quote! { unsafe }); + } + tokens.append_all(quote! { #signature }); + + let expressions = match endianness { + Endianness::Little | Endianness::NA => &intrinsic.compose, + Endianness::Big => &intrinsic.big_endian_compose, + }; + + tokens.append_all(quote! { #signature }); + + // If the intrinsic function is explicitly unsafe, we populate `body_default_safety` with + // the implementation. No explicit unsafe blocks are required. + // + // If the intrinsic is safe, we fill `body_default_safety` until we encounter an expression + // that requires an unsafe wrapper, then switch to `body_unsafe`. Since the unsafe + // operation (e.g. memory access) is typically the last step, this tends to minimises the + // amount of unsafe code required. + let mut body_default_safety = TokenStream::new(); + let mut body_unsafe = TokenStream::new(); + let mut body_current = &mut body_default_safety; + for (pos, ex) in expressions.iter().with_position() { + if safety.is_safe() && ex.requires_unsafe_wrapper(&fn_name) { + body_current = &mut body_unsafe; } - let mut body = body_default_safety; - if !body_unsafe.is_empty() { - body.append_all(quote! { unsafe { #body_unsafe } }); + ex.to_tokens(body_current); + let is_last = matches!(pos, itertools::Position::Last | itertools::Position::Only); + let is_llvm_link = matches!(ex, Expression::LLVMLink(_)); + if !is_last && !is_llvm_link { + body_current.append(Punct::new(';', Spacing::Alone)); } + } + let mut body = body_default_safety; + if !body_unsafe.is_empty() { + body.append_all(quote! { unsafe { #body_unsafe } }); + } - tokens.append(Group::new(Delimiter::Brace, body)); + tokens.append(Group::new(Delimiter::Brace, body)); +} + +impl ToTokens for Intrinsic { + fn to_tokens(&self, tokens: &mut TokenStream) { + if self.big_endian_compose.len() >= 1 { + for i in 0..2 { + match i { + 0 => create_tokens(self, Endianness::Little, tokens), + 1 => create_tokens(self, Endianness::Big, tokens), + _ => panic!("Currently only little and big endian exist"), + } + } + } else { + create_tokens(self, Endianness::NA, tokens); + } } } diff --git a/crates/stdarch-gen-arm/src/main.rs b/crates/stdarch-gen-arm/src/main.rs index c78e5dc4e4..9ea1917c14 100644 --- a/crates/stdarch-gen-arm/src/main.rs +++ b/crates/stdarch-gen-arm/src/main.rs @@ -1,6 +1,7 @@ #![feature(pattern)] mod assert_instr; +mod big_endian; mod context; mod expression; mod fn_suffix; From 3afbf22c4757103d02b481cd9167a999bb98598d Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 31 Jan 2025 11:15:05 +0000 Subject: [PATCH 02/13] Update YAML & generated files --- .../core_arch/src/aarch64/neon/generated.rs | 60071 +++++-- .../src/arm_shared/neon/generated.rs | 121589 ++++++++++++--- .../spec/neon/aarch64.spec.yml | 1848 +- .../spec/neon/arm_shared.spec.yml | 2060 +- 4 files changed, 147874 insertions(+), 37694 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 90a70ee4d7..7f1f737328 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -1,9 +1,9 @@ // This code is automatically generated. DO NOT MODIFY. // -// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: // // ``` -// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec // ``` #![allow(improper_ctypes)] @@ -12,11 +12,52 @@ use stdarch_test::assert_instr; use super::*; +#[doc = "CRC32-C single round checksum for quad words (64 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "crc")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(crc32cx))] +#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] +pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32cx" + )] + fn ___crc32cd(crc: i32, data: i64) -> i32; + } + ___crc32cd(crc.as_signed(), data.as_signed()).as_unsigned() +} + +#[doc = "CRC32 single round checksum for quad words (64 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "crc")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(crc32x))] +#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] +pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32x" + )] + fn ___crc32d(crc: i32, data: i64) -> i32; + } + ___crc32d(crc.as_signed(), data.as_signed()).as_unsigned() +} + #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -27,11 +68,34 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let f: uint8x8_t = simd_cast(f); simd_add(a, simd_cast(f)) } + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: int8x8_t = vabd_s8(d, e); + let f: uint8x8_t = simd_cast(f); + let ret_val: int16x8_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -42,11 +106,34 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let f: uint16x4_t = simd_cast(f); simd_add(a, simd_cast(f)) } + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let f: int16x4_t = vabd_s16(d, e); + let f: uint16x4_t = simd_cast(f); + let ret_val: int32x4_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -57,11 +144,34 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let f: uint32x2_t = simd_cast(f); simd_add(a, simd_cast(f)) } + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let e: int32x2_t = simd_shuffle!(c, c, [2, 3]); + let f: int32x2_t = vabd_s32(d, e); + let f: uint32x2_t = simd_cast(f); + let ret_val: int64x2_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -71,11 +181,33 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let f: uint8x8_t = vabd_u8(d, e); simd_add(a, simd_cast(f)) } + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint8x8_t = vabd_u8(d, e); + let ret_val: uint16x8_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -85,11 +217,33 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let f: uint16x4_t = vabd_u16(d, e); simd_add(a, simd_cast(f)) } + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let f: uint16x4_t = vabd_u16(d, e); + let ret_val: uint32x4_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -99,6 +253,27 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let f: uint32x2_t = vabd_u32(d, e); simd_add(a, simd_cast(f)) } + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + let f: uint32x2_t = vabd_u32(d, e); + let ret_val: uint64x2_t = simd_add(a, simd_cast(f)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"] #[doc = "## Safety"] @@ -108,7 +283,7 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v1f64" @@ -117,16 +292,18 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vabd_f64(a, b) } + #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v2f64" @@ -135,6 +312,30 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vabdq_f64(a, b) } + +#[doc = "Absolute difference between the arguments of Floating"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fabd))] +pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fabd.v2f64" + )] + fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vabdq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"] #[doc = "## Safety"] @@ -146,6 +347,7 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } + #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"] #[doc = "## Safety"] @@ -157,11 +359,13 @@ pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } + #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -171,11 +375,32 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let e: uint16x4_t = simd_cast(vabd_s16(c, d)); simd_cast(e) } + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sabdl))] +pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let e: uint16x4_t = simd_cast(vabd_s16(c, d)); + let ret_val: int32x4_t = simd_cast(e); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -185,11 +410,32 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let e: uint32x2_t = simd_cast(vabd_s32(c, d)); simd_cast(e) } + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sabdl))] +pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let e: uint32x2_t = simd_cast(vabd_s32(c, d)); + let ret_val: int64x2_t = simd_cast(e); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -199,11 +445,32 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let e: uint8x8_t = simd_cast(vabd_s8(c, d)); simd_cast(e) } + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sabdl))] +pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x8_t = simd_cast(vabd_s8(c, d)); + let ret_val: int16x8_t = simd_cast(e); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -212,11 +479,31 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_cast(vabd_u8(c, d)) } + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = simd_cast(vabd_u8(c, d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -225,11 +512,31 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); simd_cast(vabd_u16(c, d)) } + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: uint32x4_t = simd_cast(vabd_u16(c, d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -238,6 +545,25 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); simd_cast(vabd_u32(c, d)) } + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uabdl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: uint64x2_t = simd_cast(vabd_u32(c, d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"] #[doc = "## Safety"] @@ -249,30 +575,129 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { simd_fabs(a) } + #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { simd_fabs(a) } -#[doc = "Add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] + +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fabs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { - a.wrapping_add(b) +pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_fabs(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"] + +#[doc = "Absolute Value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(abs))] +pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v1i64" + )] + fn _vabs_s64(a: int64x1_t) -> int64x1_t; + } + _vabs_s64(a) +} + +#[doc = "Absolute Value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(abs))] +pub unsafe fn vabsd_s64(a: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.i64" + )] + fn _vabsd_s64(a: i64) -> i64; + } + _vabsd_s64(a) +} + +#[doc = "Absolute Value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(abs))] +pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v2i64" + )] + fn _vabsq_s64(a: int64x2_t) -> int64x2_t; + } + _vabsq_s64(a) +} + +#[doc = "Absolute Value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(abs))] +pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v2i64" + )] + fn _vabsq_s64(a: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vabsq_s64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { + a.wrapping_add(b) +} + +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -282,70 +707,141 @@ pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { a.wrapping_add(b) } + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" + )] + fn _vaddlv_s16(a: int16x4_t) -> i32; + } + _vaddlv_s16(a) +} + #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" )] fn _vaddlv_s16(a: int16x4_t) -> i32; } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlv_s16(a) } + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" + )] + fn _vaddlvq_s16(a: int16x8_t) -> i32; + } + _vaddlvq_s16(a) +} + #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" )] fn _vaddlvq_s16(a: int16x8_t) -> i32; } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlvq_s16(a) } + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" + )] + fn _vaddlvq_s32(a: int32x4_t) -> i64; + } + _vaddlvq_s32(a) +} + #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" )] fn _vaddlvq_s32(a: int32x4_t) -> i64; } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlvq_s32(a) } + #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { - unsafe extern "unadjusted" { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" @@ -354,5620 +850,12828 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { } _vaddlv_s32(a) } -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(saddlp))] +pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" + link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" )] - fn _vaddlv_u16(a: int16x4_t) -> i32; + fn _vaddlv_s32(a: int32x2_t) -> i64; } - _vaddlv_u16(a.as_signed()).as_unsigned() + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddlv_s32(a) } -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" + link_name = "llvm.aarch64.neon.saddlv.i32.v8i8" )] - fn _vaddlvq_u16(a: int16x8_t) -> i32; + fn _vaddlv_s8(a: int8x8_t) -> i32; } - _vaddlvq_u16(a.as_signed()).as_unsigned() + _vaddlv_s8(a) as i16 } -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" + link_name = "llvm.aarch64.neon.saddlv.i32.v8i8" )] - fn _vaddlvq_u32(a: int32x4_t) -> i64; + fn _vaddlv_s8(a: int8x8_t) -> i32; } - _vaddlvq_u32(a.as_signed()).as_unsigned() + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddlv_s8(a) as i16 } -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlp))] -pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" + link_name = "llvm.aarch64.neon.saddlv.i32.v16i8" )] - fn _vaddlv_u32(a: int32x2_t) -> i64; + fn _vaddlvq_s8(a: int8x16_t) -> i32; } - _vaddlv_u32(a.as_signed()).as_unsigned() + _vaddlvq_s8(a) as i16 } -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] + +#[doc = "Signed Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(saddlv))] +pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v2f32" + link_name = "llvm.aarch64.neon.saddlv.i32.v16i8" )] - fn _vaddv_f32(a: float32x2_t) -> f32; + fn _vaddlvq_s8(a: int8x16_t) -> i32; } - _vaddv_f32(a) + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vaddlvq_s8(a) as i16 } -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v4f32" + link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" )] - fn _vaddvq_f32(a: float32x4_t) -> f32; + fn _vaddlv_u16(a: int16x4_t) -> i32; } - _vaddvq_f32(a) + _vaddlv_u16(a.as_signed()).as_unsigned() } -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f64.v2f64" + link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" )] - fn _vaddvq_f64(a: float64x2_t) -> f64; + fn _vaddlv_u16(a: int16x4_t) -> i32; } - _vaddvq_f64(a) + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddlv_u16(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v16i8" + link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" )] - fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _vaddlvq_u16(a: int16x8_t) -> i32; } - _vbcaxq_s8(a, b, c) + _vaddlvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v8i16" + link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" )] - fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _vaddlvq_u16(a: int16x8_t) -> i32; } - _vbcaxq_s16(a, b, c) + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddlvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v4i32" + link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" )] - fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vaddlvq_u32(a: int32x4_t) -> i64; } - _vbcaxq_s32(a, b, c) + _vaddlvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v2i64" + link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" )] - fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vaddlvq_u32(a: int32x4_t) -> i64; } - _vbcaxq_s64(a, b, c) + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddlvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlp))] +pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v16i8" + link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" )] - fn _vbcaxq_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _vaddlv_u32(a: int32x2_t) -> i64; } - _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vaddlv_u32(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlp))] +pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v8i16" + link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" )] - fn _vbcaxq_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _vaddlv_u32(a: int32x2_t) -> i64; } - _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddlv_u32(a.as_signed()).as_unsigned() } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v4i32" + link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" )] - fn _vbcaxq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vaddlv_u8(a: int8x8_t) -> i32; } - _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vaddlv_u8(a.as_signed()).as_unsigned() as u16 } -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v2i64" + link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" )] - fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vaddlv_u8(a: int8x8_t) -> i32; } - _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddlv_u8(a.as_signed()).as_unsigned() as u16 } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" + link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" )] - fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vaddlvq_u8(a: int8x16_t) -> i32; } - _vcadd_rot270_f32(a, b) + _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] + +#[doc = "Unsigned Add Long across Vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uaddlv))] +pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" + link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" )] - fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vaddlvq_u8(a: int8x16_t) -> i32; } - _vcaddq_rot270_f32(a, b) + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" + link_name = "llvm.aarch64.neon.faddv.f32.v2f32" )] - fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vaddv_f32(a: float32x2_t) -> f32; } - _vcaddq_rot270_f64(a, b) + _vaddv_f32(a) } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" + link_name = "llvm.aarch64.neon.faddv.f32.v2f32" )] - fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vaddv_f32(a: float32x2_t) -> f32; } - _vcadd_rot90_f32(a, b) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddv_f32(a) } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" + link_name = "llvm.aarch64.neon.faddv.f32.v4f32" )] - fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vaddvq_f32(a: float32x4_t) -> f32; } - _vcaddq_rot90_f32(a, b) + _vaddvq_f32(a) } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" + link_name = "llvm.aarch64.neon.faddv.f32.v4f32" )] - fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vaddvq_f32(a: float32x4_t) -> f32; } - _vcaddq_rot90_f64(a, b) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddvq_f32(a) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v1i64.v1f64" + link_name = "llvm.aarch64.neon.faddv.f64.v2f64" )] - fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + fn _vaddvq_f64(a: float64x2_t) -> f64; } - _vcage_f64(a, b).as_unsigned() + _vaddvq_f64(a) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] + +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" + link_name = "llvm.aarch64.neon.faddv.f64.v2f64" )] - fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + fn _vaddvq_f64(a: float64x2_t) -> f64; } - _vcageq_f64(a, b).as_unsigned() + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddvq_f64(a) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.i64.f64" + link_name = "llvm.aarch64.neon.saddv.i32.v2i32" )] - fn _vcaged_f64(a: f64, b: f64) -> i64; + fn _vaddv_s32(a: int32x2_t) -> i32; } - _vcaged_f64(a, b).as_unsigned() + _vaddv_s32(a) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.i32.f32" + link_name = "llvm.aarch64.neon.saddv.i32.v2i32" )] - fn _vcages_f32(a: f32, b: f32) -> i32; + fn _vaddv_s32(a: int32x2_t) -> i32; } - _vcages_f32(a, b).as_unsigned() + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddv_s32(a) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64" + link_name = "llvm.aarch64.neon.saddv.i32.v8i8" )] - fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + fn _vaddv_s8(a: int8x8_t) -> i8; } - _vcagt_f64(a, b).as_unsigned() + _vaddv_s8(a) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" + link_name = "llvm.aarch64.neon.saddv.i32.v8i8" )] - fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + fn _vaddv_s8(a: int8x8_t) -> i8; } - _vcagtq_f64(a, b).as_unsigned() + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddv_s8(a) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.i64.f64" + link_name = "llvm.aarch64.neon.saddv.i32.v16i8" )] - fn _vcagtd_f64(a: f64, b: f64) -> i64; + fn _vaddvq_s8(a: int8x16_t) -> i8; } - _vcagtd_f64(a, b).as_unsigned() + _vaddvq_s8(a) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.i32.f32" + link_name = "llvm.aarch64.neon.saddv.i32.v16i8" )] - fn _vcagts_f32(a: f32, b: f32) -> i32; + fn _vaddvq_s8(a: int8x16_t) -> i8; } - _vcagts_f32(a, b).as_unsigned() + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vaddvq_s8(a) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - vcage_f64(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v4i16" + )] + fn _vaddv_s16(a: int16x4_t) -> i16; + } + _vaddv_s16(a) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - vcageq_f64(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v4i16" + )] + fn _vaddv_s16(a: int16x4_t) -> i16; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddv_s16(a) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { - vcaged_f64(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v8i16" + )] + fn _vaddvq_s16(a: int16x8_t) -> i16; + } + _vaddvq_s16(a) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { - vcages_f32(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v8i16" + )] + fn _vaddvq_s16(a: int16x8_t) -> i16; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddvq_s16(a) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - vcagt_f64(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v4i32" + )] + fn _vaddvq_s32(a: int32x4_t) -> i32; + } + _vaddvq_s32(a) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - vcagtq_f64(b, a) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i32.v4i32" + )] + fn _vaddvq_s32(a: int32x4_t) -> i32; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddvq_s32(a) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { - vcagtd_f64(b, a) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" + )] + fn _vaddv_u32(a: int32x2_t) -> i32; + } + _vaddv_u32(a.as_signed()).as_unsigned() } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { - vcagts_f32(b, a) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" + )] + fn _vaddv_u32(a: int32x2_t) -> i32; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddv_u32(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" + )] + fn _vaddv_u8(a: int8x8_t) -> i8; + } + _vaddv_u8(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" + )] + fn _vaddv_u8(a: int8x8_t) -> i8; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddv_u8(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" + )] + fn _vaddvq_u8(a: int8x16_t) -> i8; + } + _vaddvq_u8(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" + )] + fn _vaddvq_u8(a: int8x16_t) -> i8; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vaddvq_u8(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" + )] + fn _vaddv_u16(a: int16x4_t) -> i16; + } + _vaddv_u16(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" + )] + fn _vaddv_u16(a: int16x4_t) -> i16; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddv_u16(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" + )] + fn _vaddvq_u16(a: int16x8_t) -> i16; + } + _vaddvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { - simd_eq(a, b) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" + )] + fn _vaddvq_u16(a: int16x8_t) -> i16; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vaddvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { - simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" + )] + fn _vaddvq_u32(a: int32x4_t) -> i32; + } + _vaddvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { - simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +#[cfg_attr(test, assert_instr(addv))] +pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" + )] + fn _vaddvq_u32(a: int32x4_t) -> i32; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vaddvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Compare bitwise equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { - transmute(vceq_s64(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i64.v2i64" + )] + fn _vaddvq_s64(a: int64x2_t) -> i64; + } + _vaddvq_s64(a) } -#[doc = "Compare bitwise equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { - transmute(vceq_u64(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddv.i64.v2i64" + )] + fn _vaddvq_s64(a: int64x2_t) -> i64; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddvq_s64(a) } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { - let b: f32x2 = f32x2::new(0.0, 0.0); - simd_eq(a, transmute(b)) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" + )] + fn _vaddvq_u64(a: int64x2_t) -> i64; + } + _vaddvq_u64(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] + +#[doc = "Add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - simd_eq(a, transmute(b)) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" + )] + fn _vaddvq_u64(a: int64x2_t) -> i64; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + _vaddvq_u64(a.as_signed()).as_unsigned() } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { - let b: f64 = 0.0; - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v16i8" + )] + fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vbcaxq_s8(a, b, c) } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { - let b: f64x2 = f64x2::new(0.0, 0.0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v16i8" + )] + fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vbcaxq_s8(a, b, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v8i16" + )] + fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _vbcaxq_s16(a, b, c) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v8i16" + )] + fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vbcaxq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { - let b: i16x4 = i16x4::new(0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v4i32" + )] + fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vbcaxq_s32(a, b, c) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v4i32" + )] + fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vbcaxq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { - let b: i32x2 = i32x2::new(0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v2i64" + )] + fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vbcaxq_s64(a, b, c) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { - let b: i32x4 = i32x4::new(0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxs.v2i64" + )] + fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = _vbcaxq_s64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v16i8" + )] + fn _vbcaxq_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v16i8" + )] + fn _vbcaxq_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v8i16" + )] + fn _vbcaxq_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) -} -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v8i16" + )] + fn _vbcaxq_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = + _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v4i32" + )] + fn _vbcaxq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { - let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v4i32" + )] + fn _vbcaxq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { - let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v2i64" + )] + fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] + +#[doc = "Bit clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { - let b: u16x4 = u16x4::new(0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(bcax))] +pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.bcaxu.v2i64" + )] + fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = + _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { - let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" + )] + fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vcadd_rot270_f32(a, b) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { - let b: u32x2 = u32x2::new(0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" + )] + fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vcadd_rot270_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { - let b: u32x4 = u32x4::new(0, 0, 0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" + )] + fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vcaddq_rot270_f32(a, b) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { - let b: u64x1 = u64x1::new(0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" + )] + fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcaddq_rot270_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { - let b: u64x2 = u64x2::new(0, 0); - simd_eq(a, transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" + )] + fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vcaddq_rot270_f64(a, b) } -#[doc = "Compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzd_s64(a: i64) -> u64 { - transmute(vceqz_s64(transmute(a))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" + )] + fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vcaddq_rot270_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzd_u64(a: u64) -> u64 { - transmute(vceqz_u64(transmute(a))) -} -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" + )] + fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vcadd_rot90_f32(a, b) +} + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzs_f32(a: f32) -> u32 { - simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" + )] + fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vcadd_rot90_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzd_f64(a: f64) -> u64 { - simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" + )] + fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vcaddq_rot90_f32(a, b) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - simd_ge(a, b) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" + )] + fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcaddq_rot90_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - simd_ge(a, b) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" + )] + fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vcaddq_rot90_f64(a, b) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"] + +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" + )] + fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vcaddq_rot90_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - simd_ge(a, b) +pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v1i64.v1f64" + )] + fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + } + _vcage_f64(a, b).as_unsigned() } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - simd_ge(a, b) +pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" + )] + fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + } + _vcageq_f64(a, b).as_unsigned() } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_ge(a, b) +pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" + )] + fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vcageq_f64(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_ge(a, b) +pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.i64.f64" + )] + fn _vcaged_f64(a: f64, b: f64) -> i64; + } + _vcaged_f64(a, b).as_unsigned() } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { - simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.i32.f32" + )] + fn _vcages_f32(a: f32, b: f32) -> i32; + } + _vcages_f32(a, b).as_unsigned() } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { - simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64" + )] + fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + } + _vcagt_f64(a, b).as_unsigned() } -#[doc = "Compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { - transmute(vcge_s64(transmute(a), transmute(b))) +pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" + )] + fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + } + _vcagtq_f64(a, b).as_unsigned() } -#[doc = "Compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { - transmute(vcge_u64(transmute(a), transmute(b))) +pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" + )] + fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vcagtq_f64(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { - let b: f32x2 = f32x2::new(0.0, 0.0); - simd_ge(a, transmute(b)) +pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.i64.f64" + )] + fn _vcagtd_f64(a: f64, b: f64) -> i64; + } + _vcagtd_f64(a, b).as_unsigned() } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - simd_ge(a, transmute(b)) +pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.i32.f32" + )] + fn _vcagts_f32(a: f32, b: f32) -> i32; + } + _vcagts_f32(a, b).as_unsigned() } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { - let b: f64 = 0.0; - simd_ge(a, transmute(b)) +pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + vcage_f64(b, a) } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { - let b: f64x2 = f64x2::new(0.0, 0.0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + vcageq_f64(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vcageq_f64(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { + vcaged_f64(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { - let b: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { + vcages_f32(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + vcagt_f64(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { - let b: i32x2 = i32x2::new(0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + vcagtq_f64(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { - let b: i32x4 = i32x4::new(0, 0, 0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vcagtq_f64(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_ge(a, transmute(b)) +pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { + vcagtd_f64(b, a) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_ge(a, transmute(b)) +pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { + vcagts_f32(b, a) } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezd_f64(a: f64) -> u64 { - simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) +pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + simd_eq(a, b) } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezs_f32(a: f32) -> u32 { - simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) +pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + simd_eq(a, b) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezd_s64(a: i64) -> u64 { - transmute(vcgez_s64(transmute(a))) +pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - simd_gt(a, b) +pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + simd_eq(a, b) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - simd_gt(a, b) +pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + simd_eq(a, b) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - simd_gt(a, b) +pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - simd_gt(a, b) +pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_eq(a, b) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_gt(a, b) +pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_eq(a, b) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_gt(a, b) +pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { - simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + simd_eq(a, b) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { - simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + simd_eq(a, b) } -#[doc = "Compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { - transmute(vcgt_s64(transmute(a), transmute(b))) +pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { - transmute(vcgt_u64(transmute(a), transmute(b))) +pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { + simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { - let b: f32x2 = f32x2::new(0.0, 0.0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { + simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] + +#[doc = "Compare bitwise equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { + transmute(vceq_s64(transmute(a), transmute(b))) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"] + +#[doc = "Compare bitwise equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { - let b: f64 = 0.0; - simd_gt(a, transmute(b)) +pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { + transmute(vceq_u64(transmute(a), transmute(b))) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { - let b: f64x2 = f64x2::new(0.0, 0.0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { + let b: f32x2 = f32x2::new(0.0, 0.0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f32x2 = f32x2::new(0.0, 0.0); + let ret_val: uint32x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { - let b: i16x4 = i16x4::new(0, 0, 0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + let ret_val: uint32x4_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { + let b: f64 = 0.0; + simd_eq(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { - let b: i32x2 = i32x2::new(0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { + let b: f64x2 = f64x2::new(0.0, 0.0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { - let b: i32x4 = i32x4::new(0, 0, 0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f64x2 = f64x2::new(0.0, 0.0); + let ret_val: uint64x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_gt(a, transmute(b)) +pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzd_f64(a: f64) -> u64 { - simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) +pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzs_f32(a: f32) -> u32 { - simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) +pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_eq(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzd_s64(a: i64) -> u64 { - transmute(vcgtz_s64(transmute(a))) +pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { + let b: i16x4 = i16x4::new(0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - simd_le(a, b) +pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - simd_le(a, b) +pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - simd_le(a, b) +pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - simd_le(a, b) +pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { + let b: i32x2 = i32x2::new(0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_le(a, b) +pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_le(a, b) +pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { + let b: i32x4 = i32x4::new(0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { - simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { - simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_eq(a, transmute(b)) } -#[doc = "Compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { - transmute(vcle_u64(transmute(a), transmute(b))) +pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { - transmute(vcle_s64(transmute(a), transmute(b))) +pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { - let b: f32x2 = f32x2::new(0.0, 0.0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { - let b: f64 = 0.0; - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { - let b: f64x2 = f64x2::new(0.0, 0.0); - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_eq(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] + +#[doc = "Signed compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { - let b: i16x4 = i16x4::new(0, 0, 0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { + let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { - let b: i32x2 = i32x2::new(0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { - let b: i32x4 = i32x4::new(0, 0, 0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { + let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_le(a, transmute(b)) +pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_eq(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_le(a, transmute(b)) +pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { + let b: u16x4 = u16x4::new(0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezd_f64(a: f64) -> u64 { - simd_extract!(vclez_f64(vdup_n_f64(a)), 0) +pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: u16x4 = u16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezs_f32(a: f32) -> u32 { - simd_extract!(vclez_f32(vdup_n_f32(a)), 0) +pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { + let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezd_s64(a: i64) -> u64 { - transmute(vclez_s64(transmute(a))) +pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - simd_lt(a, b) +pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { + let b: u32x2 = u32x2::new(0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - simd_lt(a, b) +pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: u32x2 = u32x2::new(0, 0); + let ret_val: uint32x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - simd_lt(a, b) +pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { + let b: u32x4 = u32x4::new(0, 0, 0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - simd_lt(a, b) +pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: u32x4 = u32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_lt(a, b) +pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { + let b: u64x1 = u64x1::new(0); + simd_eq(a, transmute(b)) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_lt(a, b) +pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { + let b: u64x2 = u64x2::new(0, 0); + simd_eq(a, transmute(b)) } -#[doc = "Compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"] + +#[doc = "Unsigned compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmp))] +#[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { - transmute(vclt_u64(transmute(a), transmute(b))) +pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: u64x2 = u64x2::new(0, 0); + let ret_val: uint64x2_t = simd_eq(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"] + +#[doc = "Compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { - transmute(vclt_s64(transmute(a), transmute(b))) +pub unsafe fn vceqzd_s64(a: i64) -> u64 { + transmute(vceqz_s64(transmute(a))) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"] + +#[doc = "Compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { - simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +pub unsafe fn vceqzd_u64(a: u64) -> u64 { + transmute(vceqz_u64(transmute(a))) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { - simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +pub unsafe fn vceqzs_f32(a: f32) -> u32 { + simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] + +#[doc = "Floating-point compare bitwise equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { - let b: f32x2 = f32x2::new(0.0, 0.0); - simd_lt(a, transmute(b)) +pub unsafe fn vceqzd_f64(a: f64) -> u64 { + simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - simd_lt(a, transmute(b)) +pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + simd_ge(a, b) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { - let b: f64 = 0.0; - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + simd_ge(a, b) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { - let b: f64x2 = f64x2::new(0.0, 0.0); - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + simd_ge(a, b) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + simd_ge(a, b) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { - let b: i16x4 = i16x4::new(0, 0, 0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_ge(a, b) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { - let b: i32x2 = i32x2::new(0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_ge(a, b) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { - let b: i32x4 = i32x4::new(0, 0, 0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { - let b: i64x1 = i64x1::new(0); - simd_lt(a, transmute(b)) +pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { + simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { - let b: i64x2 = i64x2::new(0, 0); - simd_lt(a, transmute(b)) +pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { + simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"] + +#[doc = "Compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzd_f64(a: f64) -> u64 { - simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) +pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { + transmute(vcge_s64(transmute(a), transmute(b))) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"] + +#[doc = "Compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmp))] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzs_f32(a: f32) -> u32 { - simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) +pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { + transmute(vcge_u64(transmute(a), transmute(b))) } -#[doc = "Compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(asr))] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzd_s64(a: i64) -> u64 { - transmute(vcltz_s64(transmute(a))) +pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { + let b: f32x2 = f32x2::new(0.0, 0.0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" - )] - fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vcmla_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f32x2 = f32x2::new(0.0, 0.0); + let ret_val: uint32x2_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" - )] - fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - _vcmlaq_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" - )] - fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vcmlaq_f64(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + let ret_val: uint32x4_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { + let b: f64 = 0.0; + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { + let b: f64x2 = f64x2::new(0.0, 0.0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f64x2 = f64x2::new(0.0, 0.0); + let ret_val: uint64x2_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" - )] - fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vcmla_rot180_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" - )] - fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - _vcmlaq_rot180_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" - )] - fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vcmlaq_rot180_f64(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_ge(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot180_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot180_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { + let b: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot180_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot180_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot180_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot180_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot180_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot180_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" - )] - fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vcmla_rot270_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { + let b: i32x2 = i32x2::new(0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" - )] - fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - _vcmlaq_rot270_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" - )] - fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vcmlaq_rot270_f64(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { + let b: i32x4 = i32x4::new(0, 0, 0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot270_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot270_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot270_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot270_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot270_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot270_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_ge(a, transmute(b)) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot270_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot270_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_ge(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" - )] - fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vcmla_rot90_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezd_f64(a: f64) -> u64 { + simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] + +#[doc = "Floating-point compare greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" - )] - fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - _vcmlaq_rot90_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezs_f32(a: f32) -> u32 { + simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] + +#[doc = "Compare signed greater than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" - )] - fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vcmlaq_rot90_f64(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgezd_s64(a: i64) -> u64 { + transmute(vcgez_s64(transmute(a))) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot90_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot90_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + simd_gt(a, b) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot90_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot90_f32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + simd_gt(a, b) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot90_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - vcmla_rot90_f32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot90_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - vcmlaq_rot90_f32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + simd_gt(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_f32( - a: float32x2_t, - b: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + simd_gt(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s8( - a: int8x8_t, - b: int8x8_t, -) -> int8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s8( - a: int8x16_t, - b: int8x8_t, -) -> int8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_gt(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s16( - a: int16x4_t, - b: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_gt(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s16( - a: int16x8_t, - b: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s32( - a: int32x2_t, - b: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { + simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s32( - a: int32x4_t, - b: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { + simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] + +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u8( - a: uint8x8_t, - b: uint8x8_t, -) -> uint8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { + transmute(vcgt_s64(transmute(a), transmute(b))) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] + +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u8( - a: uint8x16_t, - b: uint8x8_t, -) -> uint8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { + transmute(vcgt_u64(transmute(a), transmute(b))) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u16( - a: uint16x4_t, - b: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { + let b: f32x2 = f32x2::new(0.0, 0.0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u16( - a: uint16x8_t, - b: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f32x2 = f32x2::new(0.0, 0.0); + let ret_val: uint32x2_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u32( - a: uint32x2_t, - b: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u32( - a: uint32x4_t, - b: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + let ret_val: uint32x4_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_p8( - a: poly8x8_t, - b: poly8x8_t, -) -> poly8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { + let b: f64 = 0.0; + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p8( - a: poly8x16_t, - b: poly8x8_t, -) -> poly8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { + let b: f64x2 = f64x2::new(0.0, 0.0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_p16( - a: poly16x4_t, - b: poly16x4_t, -) -> poly16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f64x2 = f64x2::new(0.0, 0.0); + let ret_val: uint64x2_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p16( - a: poly16x8_t, - b: poly16x4_t, -) -> poly16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_f32( - a: float32x2_t, - b: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f32( - a: float32x4_t, - b: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f64( - a: float64x2_t, - b: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_gt(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s8( - a: int8x8_t, - b: int8x16_t, -) -> int8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { + let b: i16x4 = i16x4::new(0, 0, 0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s8( - a: int8x16_t, - b: int8x16_t, -) -> int8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s16( - a: int16x4_t, - b: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s16( - a: int16x8_t, - b: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s32( - a: int32x2_t, - b: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { + let b: i32x2 = i32x2::new(0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s32( - a: int32x4_t, - b: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s64( - a: int64x2_t, - b: int64x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { + let b: i32x4 = i32x4::new(0, 0, 0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u8( - a: uint8x8_t, - b: uint8x16_t, -) -> uint8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u8( - a: uint8x16_t, - b: uint8x16_t, -) -> uint8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u16( - a: uint16x4_t, - b: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_gt(a, transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_gt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u32( - a: uint32x2_t, - b: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzd_f64(a: f64) -> u64 { + simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] + +#[doc = "Floating-point compare greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzs_f32(a: f32) -> u32 { + simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] + +#[doc = "Compare signed greater than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u64( - a: uint64x2_t, - b: uint64x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcgtzd_s64(a: i64) -> u64 { + transmute(vcgtz_s64(transmute(a))) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_p8( - a: poly8x8_t, - b: poly8x16_t, -) -> poly8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + simd_le(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p8( - a: poly8x16_t, - b: poly8x16_t, -) -> poly8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, +pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + simd_le(a, b) +} + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + simd_le(a, b) +} + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + simd_le(a, b) +} + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmge))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_le(a, b) +} + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_le(a, b) +} + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { + simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +} + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { + simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +} + +#[doc = "Compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { + transmute(vcle_u64(transmute(a), transmute(b))) +} + +#[doc = "Compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { + transmute(vcle_s64(transmute(a), transmute(b))) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { + let b: f32x2 = f32x2::new(0.0, 0.0); + simd_le(a, transmute(b)) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f32x2 = f32x2::new(0.0, 0.0); + let ret_val: uint32x2_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + simd_le(a, transmute(b)) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + let ret_val: uint32x4_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { + let b: f64 = 0.0; + simd_le(a, transmute(b)) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { + let b: f64x2 = f64x2::new(0.0, 0.0); + simd_le(a, transmute(b)) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f64x2 = f64x2::new(0.0, 0.0); + let ret_val: uint64x2_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_le(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { + let b: i16x4 = i16x4::new(0, 0, 0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { + let b: i32x2 = i32x2::new(0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { + let b: i32x4 = i32x4::new(0, 0, 0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_le(a, transmute(b)) +} + +#[doc = "Compare signed less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmle))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_le(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezd_f64(a: f64) -> u64 { + simd_extract!(vclez_f64(vdup_n_f64(a)), 0) +} + +#[doc = "Floating-point compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezs_f32(a: f32) -> u32 { + simd_extract!(vclez_f32(vdup_n_f32(a)), 0) +} + +#[doc = "Compare less than or equal to zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclezd_s64(a: i64) -> u64 { + transmute(vclez_s64(transmute(a))) +} + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { + simd_lt(a, b) +} + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + simd_lt(a, b) +} + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + simd_lt(a, b) +} + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + simd_lt(a, b) +} + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmgt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_lt(a, b) +} + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_lt(a, b) +} + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmhi))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { + transmute(vclt_u64(transmute(a), transmute(b))) +} + +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { + transmute(vclt_s64(transmute(a), transmute(b))) +} + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { + simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) +} + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { + simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { + let b: f32x2 = f32x2::new(0.0, 0.0); + simd_lt(a, transmute(b)) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f32x2 = f32x2::new(0.0, 0.0); + let ret_val: uint32x2_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + simd_lt(a, transmute(b)) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); + let ret_val: uint32x4_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { + let b: f64 = 0.0; + simd_lt(a, transmute(b)) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { + let b: f64x2 = f64x2::new(0.0, 0.0); + simd_lt(a, transmute(b)) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: f64x2 = f64x2::new(0.0, 0.0); + let ret_val: uint64x2_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_lt(a, transmute(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { + let b: i16x4 = i16x4::new(0, 0, 0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { + let b: i32x2 = i32x2::new(0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { + let b: i32x4 = i32x4::new(0, 0, 0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { + let b: i64x1 = i64x1::new(0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { + let b: i64x2 = i64x2::new(0, 0); + simd_lt(a, transmute(b)) +} + +#[doc = "Compare signed less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmlt))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_lt(a, transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzd_f64(a: f64) -> u64 { + simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) +} + +#[doc = "Floating-point compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzs_f32(a: f32) -> u32 { + simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) +} + +#[doc = "Compare less than zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(asr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcltzd_s64(a: i64) -> u64 { + transmute(vcltz_s64(transmute(a))) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" + )] + fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + _vcmla_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" + )] + fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = _vcmla_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" + )] + fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vcmlaq_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" + )] + fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcmlaq_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" + )] + fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vcmlaq_f64(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" + )] + fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = _vcmlaq_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" + )] + fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + _vcmla_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" + )] + fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = _vcmla_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" + )] + fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vcmlaq_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" + )] + fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcmlaq_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" + )] + fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vcmlaq_rot180_f64(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" + )] + fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = _vcmlaq_rot180_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot180_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot180_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot180_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot180_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot180_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot180_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot180_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot180_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot180_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" + )] + fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + _vcmla_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" + )] + fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = _vcmla_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" + )] + fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vcmlaq_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" + )] + fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcmlaq_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" + )] + fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vcmlaq_rot270_f64(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" + )] + fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = _vcmlaq_rot270_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot270_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot270_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot270_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot270_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot270_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot270_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot270_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot270_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot270_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" + )] + fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + _vcmla_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" + )] + fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = _vcmla_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" + )] + fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vcmlaq_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" + )] + fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcmlaq_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" + )] + fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vcmlaq_rot90_f64(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" + )] + fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = _vcmlaq_rot90_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot90_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot90_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert!(LANE == 0); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot90_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot90_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert!(LANE == 0); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot90_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + vcmla_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmla_rot90_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); + let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot90_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + vcmlaq_rot90_f32(a, b, c) +} + +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +pub unsafe fn vcmlaq_rot90_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!( + c, + c, + [ + 2 * LANE as u32, + 2 * LANE as u32 + 1, + 2 * LANE as u32, + 2 * LANE as u32 + 1 + ] + ); + let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_f32( + a: float32x2_t, + b: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_f32( + a: float32x2_t, + b: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s8( + a: int8x8_t, + b: int8x8_t, +) -> int8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s8( + a: int8x8_t, + b: int8x8_t, +) -> int8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s16( + a: int16x4_t, + b: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s16( + a: int16x4_t, + b: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s32( + a: int32x2_t, + b: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_s32( + a: int32x2_t, + b: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u8( + a: uint8x8_t, + b: uint8x8_t, +) -> uint8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u8( + a: uint8x8_t, + b: uint8x8_t, +) -> uint8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u16( + a: uint16x4_t, + b: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u16( + a: uint16x4_t, + b: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u32( + a: uint32x2_t, + b: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_u32( + a: uint32x2_t, + b: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_p8( + a: poly8x8_t, + b: poly8x8_t, +) -> poly8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_p8( + a: poly8x8_t, + b: poly8x8_t, +) -> poly8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_p16( + a: poly16x4_t, + b: poly16x4_t, +) -> poly16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_lane_p16( + a: poly16x4_t, + b: poly16x4_t, +) -> poly16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_f32( + a: float32x2_t, + b: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_f32( + a: float32x2_t, + b: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s8( + a: int8x8_t, + b: int8x16_t, +) -> int8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s8( + a: int8x8_t, + b: int8x16_t, +) -> int8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s16( + a: int16x4_t, + b: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s16( + a: int16x4_t, + b: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s32( + a: int32x2_t, + b: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_s32( + a: int32x2_t, + b: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u8( + a: uint8x8_t, + b: uint8x16_t, +) -> uint8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u8( + a: uint8x8_t, + b: uint8x16_t, +) -> uint8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u16( + a: uint16x4_t, + b: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u16( + a: uint16x4_t, + b: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u32( + a: uint32x2_t, + b: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_u32( + a: uint32x2_t, + b: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_p8( + a: poly8x8_t, + b: poly8x16_t, +) -> poly8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_p8( + a: poly8x8_t, + b: poly8x16_t, +) -> poly8x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_p16( + a: poly16x4_t, + b: poly16x8_t, +) -> poly16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopy_laneq_p16( + a: poly16x4_t, + b: poly16x8_t, +) -> poly16x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_f32( + a: float32x4_t, + b: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_f32( + a: float32x4_t, + b: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_f64( + a: float64x2_t, + b: float64x1_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_f64( + a: float64x2_t, + b: float64x1_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s64( + a: int64x2_t, + b: int64x1_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s64( + a: int64x2_t, + b: int64x1_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u64( + a: uint64x2_t, + b: uint64x1_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u64( + a: uint64x2_t, + b: uint64x1_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p64( + a: poly64x2_t, + b: poly64x1_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p64( + a: poly64x2_t, + b: poly64x1_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s8( + a: int8x16_t, + b: int8x8_t, +) -> int8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s8( + a: int8x16_t, + b: int8x8_t, +) -> int8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s16( + a: int16x8_t, + b: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s16( + a: int16x8_t, + b: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s32( + a: int32x4_t, + b: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s32( + a: int32x4_t, + b: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u8( + a: uint8x16_t, + b: uint8x8_t, +) -> uint8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u8( + a: uint8x16_t, + b: uint8x8_t, +) -> uint8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u16( + a: uint16x8_t, + b: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u16( + a: uint16x8_t, + b: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u32( + a: uint32x4_t, + b: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u32( + a: uint32x4_t, + b: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p8( + a: poly8x16_t, + b: poly8x8_t, +) -> poly8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p8( + a: poly8x16_t, + b: poly8x8_t, +) -> poly8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p16( + a: poly16x8_t, + b: poly16x4_t, +) -> poly16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_p16( + a: poly16x8_t, + b: poly16x4_t, +) -> poly16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f32( + a: float32x4_t, + b: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f32( + a: float32x4_t, + b: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f64( + a: float64x2_t, + b: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f64( + a: float64x2_t, + b: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s8( + a: int8x16_t, + b: int8x16_t, +) -> int8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s8( + a: int8x16_t, + b: int8x16_t, +) -> int8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s16( + a: int16x8_t, + b: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s16( + a: int16x8_t, + b: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s32( + a: int32x4_t, + b: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s32( + a: int32x4_t, + b: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s64( + a: int64x2_t, + b: int64x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s64( + a: int64x2_t, + b: int64x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u8( + a: uint8x16_t, + b: uint8x16_t, +) -> uint8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u8( + a: uint8x16_t, + b: uint8x16_t, +) -> uint8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u64( + a: uint64x2_t, + b: uint64x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u64( + a: uint64x2_t, + b: uint64x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p8( + a: poly8x16_t, + b: poly8x16_t, +) -> poly8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, + 11, + 12, + 13, + 14, + 15 + ] + ), + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p8( + a: poly8x16_t, + b: poly8x16_t, +) -> poly8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 2 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 16 + LANE2 as u32, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 3 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 16 + LANE2 as u32, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 4 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 16 + LANE2 as u32, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 5 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 16 + LANE2 as u32, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 6 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 16 + LANE2 as u32, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 7 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 16 + LANE2 as u32, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 8 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 16 + LANE2 as u32, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 9 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + LANE2 as u32, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 10 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 16 + LANE2 as u32, 11, 12, 13, @@ -5975,15050 +13679,41129 @@ pub unsafe fn vcopyq_laneq_p8( 15 ] ), - 10 => simd_shuffle!( - a, + 11 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 16 + LANE2 as u32, + 12, + 13, + 14, + 15 + ] + ), + 12 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 16 + LANE2 as u32, + 13, + 14, + 15 + ] + ), + 13 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 16 + LANE2 as u32, + 14, + 15 + ] + ), + 14 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 16 + LANE2 as u32, + 15 + ] + ), + 15 => simd_shuffle!( + a, + b, + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 16 + LANE2 as u32 + ] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p16( + a: poly16x8_t, + b: poly16x8_t, +) -> poly16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p16( + a: poly16x8_t, + b: poly16x8_t, +) -> poly16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p64( + a: poly64x2_t, + b: poly64x2_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p64( + a: poly64x2_t, + b: poly64x2_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { + transmute(a) +} + +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { + simd_cast(a) +} + +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { + simd_cast(a) +} + +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { + simd_cast(a) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { + simd_cast(a) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { + simd_cast(a) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { + simd_cast(a) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x4_t = simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { + let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); + simd_cast(b) +} + +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); + let ret_val: float64x2_t = simd_cast(b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" + )] + fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t; + } + _vcvt_n_f64_s64(a, N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; + } + _vcvtq_n_f64_s64(a, N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vcvtq_n_f64_s64(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" + )] + fn _vcvt_n_f64_u64(a: int64x1_t, n: i32) -> float64x1_t; + } + _vcvt_n_f64_u64(a.as_signed(), N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; + } + _vcvtq_n_f64_u64(a.as_signed(), N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vcvtq_n_f64_u64(a.as_signed(), N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" + )] + fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t; + } + _vcvt_n_s64_f64(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" + )] + fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + _vcvtq_n_s64_f64(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" + )] + fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtq_n_s64_f64(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" + )] + fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> int64x1_t; + } + _vcvt_n_u64_f64(a, N).as_unsigned() +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" + )] + fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + _vcvtq_n_u64_f64(a, N).as_unsigned() +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" + )] + fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtq_n_u64_f64(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v1i64.v1f64" + )] + fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvt_s64_f64(a) +} + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i64.v2f64" + )] + fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtq_s64_f64(a) +} + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i64.v2f64" + )] + fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtq_s64_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v1i64.v1f64" + )] + fn _vcvt_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvt_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i64.v2f64" + )] + fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtq_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i64.v2f64" + )] + fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtq_u64_f64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" + )] + fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvta_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" + )] + fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvta_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" + )] + fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtaq_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" + )] + fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtaq_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" + )] + fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvta_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" + )] + fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtaq_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" + )] + fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtaq_s64_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" + )] + fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvta_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" + )] + fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvta_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" + )] + fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtaq_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" + )] + fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtaq_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" + )] + fn _vcvta_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvta_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" + )] + fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtaq_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" + )] + fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtaq_u64_f64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.i32.f32" + )] + fn _vcvtas_s32_f32(a: f32) -> i32; + } + _vcvtas_s32_f32(a) +} + +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtas))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.i64.f64" + )] + fn _vcvtad_s64_f64(a: f64) -> i64; + } + _vcvtad_s64_f64(a) +} + +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.i32.f32" + )] + fn _vcvtas_u32_f32(a: f32) -> i32; + } + _vcvtas_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtau))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.i64.f64" + )] + fn _vcvtad_u64_f64(a: f64) -> i64; + } + _vcvtad_u64_f64(a).as_unsigned() +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { + a as f64 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { + a as f32 +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" + )] + fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtm_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" + )] + fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvtm_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" + )] + fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtmq_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" + )] + fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtmq_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" + )] + fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtm_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" + )] + fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtmq_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" + )] + fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtmq_s64_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" + )] + fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtm_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" + )] + fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvtm_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" + )] + fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtmq_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" + )] + fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtmq_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" + )] + fn _vcvtm_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtm_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" + )] + fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtmq_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" + )] + fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtmq_u64_f64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.i32.f32" + )] + fn _vcvtms_s32_f32(a: f32) -> i32; + } + _vcvtms_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtms))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.i64.f64" + )] + fn _vcvtmd_s64_f64(a: f64) -> i64; + } + _vcvtmd_s64_f64(a) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" + )] + fn _vcvtms_u32_f32(a: f32) -> i32; + } + _vcvtms_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtmu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" + )] + fn _vcvtmd_u64_f64(a: f64) -> i64; + } + _vcvtmd_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" + )] + fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtn_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" + )] + fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvtn_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" + )] + fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtnq_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" + )] + fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtnq_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" + )] + fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtn_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" + )] + fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtnq_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" + )] + fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtnq_s64_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" + )] + fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtn_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" + )] + fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvtn_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" + )] + fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtnq_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" + )] + fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtnq_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" + )] + fn _vcvtn_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtn_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" + )] + fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtnq_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" + )] + fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtnq_u64_f64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.i32.f32" + )] + fn _vcvtns_s32_f32(a: f32) -> i32; + } + _vcvtns_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtns))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.i64.f64" + )] + fn _vcvtnd_s64_f64(a: f64) -> i64; + } + _vcvtnd_s64_f64(a) +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" + )] + fn _vcvtns_u32_f32(a: f32) -> i32; + } + _vcvtns_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtnu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" + )] + fn _vcvtnd_u64_f64(a: f64) -> i64; + } + _vcvtnd_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" + )] + fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtp_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" + )] + fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvtp_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" + )] + fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtpq_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" + )] + fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtpq_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" + )] + fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtp_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" + )] + fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtpq_s64_f64(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" + )] + fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vcvtpq_s64_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" + )] + fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtp_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" + )] + fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvtp_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" + )] + fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtpq_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" + )] + fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtpq_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" + )] + fn _vcvtp_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtp_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" + )] + fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtpq_u64_f64(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" + )] + fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = _vcvtpq_u64_f64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.i32.f32" + )] + fn _vcvtps_s32_f32(a: f32) -> i32; + } + _vcvtps_s32_f32(a) +} + +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.i64.f64" + )] + fn _vcvtpd_s64_f64(a: f64) -> i64; + } + _vcvtpd_s64_f64(a) +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" + )] + fn _vcvtps_u32_f32(a: f32) -> i32; + } + _vcvtps_u32_f32(a).as_unsigned() +} + +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtpu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" + )] + fn _vcvtpd_u64_f64(a: f64) -> i64; + } + _vcvtpd_u64_f64(a).as_unsigned() +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { + a as f32 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { + a as f64 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" + )] + fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32; + } + _vcvts_n_f32_s32(a, N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" + )] + fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64; + } + _vcvtd_n_f64_s64(a, N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" + )] + fn _vcvts_n_f32_u32(a: i32, n: i32) -> f32; + } + _vcvts_n_f32_u32(a.as_signed(), N) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" + )] + fn _vcvtd_n_f64_u64(a: i64, n: i32) -> f64; + } + _vcvtd_n_f64_u64(a.as_signed(), N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" + )] + fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32; + } + _vcvts_n_s32_f32(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" + )] + fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64; + } + _vcvtd_n_s64_f64(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" + )] + fn _vcvts_n_u32_f32(a: f32, n: i32) -> i32; + } + _vcvts_n_u32_f32(a, N).as_unsigned() +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" + )] + fn _vcvtd_n_u64_f64(a: f64, n: i32) -> i64; + } + _vcvtd_n_u64_f64(a, N).as_unsigned() +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { + a as i32 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { + a as i64 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { + a as u32 +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzu))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { + a as u64 +} + +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" + )] + fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; + } + _vcvtx_f32_f64(a) +} + +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" + )] + fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vcvtx_f32_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x4_t = simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtxn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { + simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_div(a, b) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_div(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_div(a, b) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_div(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_div(a, b) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_div(a, b) +} + +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_div(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_s32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_s32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x16_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_u32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x16_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x16_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_u32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x16_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { + static_assert!(N == 0); + a +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { + static_assert!(N == 0); + a +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(N, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + transmute::(simd_extract!(a, N as u32)) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { + static_assert_uimm_bits!(N, 1); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + transmute::(simd_extract!(a, N as u32)) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { + static_assert_uimm_bits!(N, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { + static_assert_uimm_bits!(N, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { + static_assert_uimm_bits!(N, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { + static_assert_uimm_bits!(N, 3); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { + static_assert_uimm_bits!(N, 4); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { + static_assert_uimm_bits!(N, 4); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { + static_assert_uimm_bits!(N, 4); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { + static_assert!(N == 0); + let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { + static_assert!(N == 0); + let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { + static_assert_uimm_bits!(N, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { + static_assert_uimm_bits!(N, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { + static_assert_uimm_bits!(N, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { + static_assert_uimm_bits!(N, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { + static_assert_uimm_bits!(N, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { + static_assert_uimm_bits!(N, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { + static_assert_uimm_bits!(N, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) +} + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { + static_assert_uimm_bits!(N, 2); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_extract!(a, N as u32) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v16i8" + )] + fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _veor3q_s8(a, b, c) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v16i8" + )] + fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _veor3q_s8(a, b, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v8i16" + )] + fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _veor3q_s16(a, b, c) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v8i16" + )] + fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _veor3q_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v4i32" + )] + fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _veor3q_s32(a, b, c) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v4i32" + )] + fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = _veor3q_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v2i64" + )] + fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _veor3q_s64(a, b, c) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v2i64" + )] + fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = _veor3q_s64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v16i8" + )] + fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v16i8" + )] + fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v8i16" + )] + fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v8i16" + )] + fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = + _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v4i32" + )] + fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v4i32" + )] + fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v2i64" + )] + fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +} + +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v2i64" + )] + fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = + _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: float64x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } +} + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: poly64x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmadd))] +pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.v1f64" + )] + fn _vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; + } + _vfma_f64(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, +) -> float64x1_t { + static_assert!(LANE == 0); + vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmadd))] +pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + vfma_f64(a, b, vdup_n_f64(c)) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f64" + )] + fn _vfmad_lane_f64(a: f64, b: f64, c: f64) -> f64; + } + static_assert!(LANE == 0); + let c: f64 = simd_extract!(c, LANE as u32); + _vfmad_lane_f64(b, c, a) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.v2f64" + )] + fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vfmaq_f64(b, c, a) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.v2f64" + )] + fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = _vfmaq_f64(b, c, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + vfmaq_f64(a, b, vdupq_n_f64(c)) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; + } + static_assert_uimm_bits!(LANE, 1); + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_lane_f32(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; + } + static_assert_uimm_bits!(LANE, 1); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_lane_f32(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; + } + static_assert_uimm_bits!(LANE, 2); + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_laneq_f32(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; + } + static_assert_uimm_bits!(LANE, 2); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_laneq_f32(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f64" + )] + fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; + } + static_assert_uimm_bits!(LANE, 1); + let c: f64 = simd_extract!(c, LANE as u32); + _vfmad_laneq_f64(b, c, a) +} + +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f64" + )] + fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; + } + static_assert_uimm_bits!(LANE, 1); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let c: f64 = simd_extract!(c, LANE as u32); + _vfmad_laneq_f64(b, c, a) +} + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + let b: float64x1_t = simd_neg(b); + vfma_f64(a, b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, +) -> float64x1_t { + static_assert!(LANE == 0); + vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + vfms_f64(a, b, vdup_n_f64(c)) +} + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let b: float64x2_t = simd_neg(b); + vfmaq_f64(a, b, c) +} + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let b: float64x2_t = simd_neg(b); + let ret_val: float64x2_t = vfmaq_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + vfmsq_f64(a, b, vdupq_n_f64(c)) +} + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + vfmas_lane_f32::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + vfmas_lane_f32::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + vfmas_laneq_f32::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vfmas_laneq_f32::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { + vfmad_lane_f64::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + vfmad_laneq_f64::(a, -b, c) +} + +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + vfmad_laneq_f64::(a, -b, c) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + let ret_val: float32x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + let ret_val: float32x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { + let ret_val: float64x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + let ret_val: int8x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + let ret_val: int8x16_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + let ret_val: int16x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + let ret_val: int16x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + let ret_val: int32x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + let ret_val: int32x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + let ret_val: int64x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + let ret_val: uint8x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + let ret_val: uint8x16_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + let ret_val: uint16x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + let ret_val: uint16x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + let ret_val: uint32x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + let ret_val: uint32x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + let ret_val: uint64x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + let ret_val: poly8x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + let ret_val: poly8x16_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + let ret_val: poly16x4_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + let ret_val: poly16x8_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + let ret_val: poly64x2_t = crate::ptr::read_unaligned(ptr.cast()); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" + )] + fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t; + } + _vld1_f64_x2(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" + )] + fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t; + } + _vld1_f64_x3(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" + )] + fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t; + } + _vld1_f64_x4(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" + )] + fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; + } + _vld1q_f64_x2(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" + )] + fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; + } + let mut ret_val: float64x2x2_t = _vld1q_f64_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" + )] + fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; + } + _vld1q_f64_x3(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" + )] + fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; + } + let mut ret_val: float64x2x3_t = _vld1q_f64_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" + )] + fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; + } + _vld1q_f64_x4(a) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" + )] + fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; + } + let mut ret_val: float64x2x4_t = _vld1q_f64_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" + )] + fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t; + } + _vld2_dup_f64(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" + )] + fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; + } + _vld2q_dup_f64(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" + )] + fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; + } + let mut ret_val: float64x2x2_t = _vld2q_dup_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" + )] + fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; + } + _vld2q_dup_s64(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" + )] + fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; + } + let mut ret_val: int64x2x2_t = _vld2q_dup_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" + )] + fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t; + } + _vld2_f64(a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" + )] + fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t; + } + _vld2_lane_f64(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" + )] + fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t; + } + _vld2_lane_s64(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { + static_assert!(LANE == 0); + transmute(vld2_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { + static_assert!(LANE == 0); + transmute(vld2_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { + transmute(vld2q_dup_s64(transmute(a))) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { + transmute(vld2q_dup_s64(transmute(a))) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" + )] + fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; + } + _vld2q_f64(a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" + )] + fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; + } + let mut ret_val: float64x2x2_t = _vld2q_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" + )] + fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; + } + _vld2q_s64(a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" + )] + fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; + } + let mut ret_val: int64x2x2_t = _vld2q_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" + )] + fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) + -> float64x2x2_t; + } + _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" + )] + fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) + -> float64x2x2_t; + } + let mut b: float64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: float64x2x2_t = _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" + )] + fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; + } + _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" + )] + fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; + } + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: int8x16x2_t = _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" + )] + fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; + } + _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" + )] + fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; + } + let mut b: int64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: int64x2x2_t = _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: poly64x2x2_t = transmute(vld2q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld2q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: uint8x16x2_t = transmute(vld2q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: uint64x2x2_t = transmute(vld2q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld2q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: poly8x16x2_t = transmute(vld2q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { + transmute(vld2q_s64(transmute(a))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { + transmute(vld2q_s64(transmute(a))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" + )] + fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t; + } + _vld3_dup_f64(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" + )] + fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; + } + _vld3q_dup_f64(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" + )] + fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; + } + let mut ret_val: float64x2x3_t = _vld3q_dup_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" + )] + fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; + } + _vld3q_dup_s64(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" + )] + fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; + } + let mut ret_val: int64x2x3_t = _vld3q_dup_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" + )] + fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t; + } + _vld3_f64(a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" + )] + fn _vld3_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + n: i64, + ptr: *const i8, + ) -> float64x1x3_t; + } + _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { + static_assert!(LANE == 0); + transmute(vld3_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" + )] + fn _vld3_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + n: i64, + ptr: *const i8, + ) -> int64x1x3_t; + } + _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { + static_assert!(LANE == 0); + transmute(vld3_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { + transmute(vld3q_dup_s64(transmute(a))) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { + transmute(vld3q_dup_s64(transmute(a))) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" + )] + fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; + } + _vld3q_f64(a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" + )] + fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; + } + let mut ret_val: float64x2x3_t = _vld3q_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" + )] + fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; + } + _vld3q_s64(a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" + )] + fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; + } + let mut ret_val: int64x2x3_t = _vld3q_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" + )] + fn _vld3q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x3_t; + } + _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" + )] + fn _vld3q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x3_t; + } + let mut b: float64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: float64x2x3_t = _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: poly64x2x3_t = transmute(vld3q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" + )] + fn _vld3q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x3_t; + } + _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" + )] + fn _vld3q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x3_t; + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: int8x16x3_t = _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" + )] + fn _vld3q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x3_t; + } + _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" + )] + fn _vld3q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x3_t; + } + let mut b: int64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: int64x2x3_t = _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld3q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: uint8x16x3_t = transmute(vld3q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: uint64x2x3_t = transmute(vld3q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld3q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: poly8x16x3_t = transmute(vld3q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { + transmute(vld3q_s64(transmute(a))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { + transmute(vld3q_s64(transmute(a))) +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" + )] + fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t; + } + _vld4_dup_f64(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" + )] + fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; + } + _vld4q_dup_f64(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" + )] + fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; + } + let mut ret_val: float64x2x4_t = _vld4q_dup_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" + )] + fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; + } + _vld4q_dup_s64(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" + )] + fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; + } + let mut ret_val: int64x2x4_t = _vld4q_dup_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" + )] + fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t; + } + _vld4_f64(a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" + )] + fn _vld4_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + n: i64, + ptr: *const i8, + ) -> float64x1x4_t; + } + _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" + )] + fn _vld4_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + n: i64, + ptr: *const i8, + ) -> int64x1x4_t; + } + _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { + static_assert!(LANE == 0); + transmute(vld4_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { + static_assert!(LANE == 0); + transmute(vld4_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { + transmute(vld4q_dup_s64(transmute(a))) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { + transmute(vld4q_dup_s64(transmute(a))) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" + )] + fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; + } + _vld4q_f64(a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" + )] + fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; + } + let mut ret_val: float64x2x4_t = _vld4q_f64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" + )] + fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; + } + _vld4q_s64(a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" + )] + fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; + } + let mut ret_val: int64x2x4_t = _vld4q_s64(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" + )] + fn _vld4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x4_t; + } + _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" + )] + fn _vld4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x4_t; + } + let mut b: float64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: float64x2x4_t = _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" + )] + fn _vld4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x4_t; + } + _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" + )] + fn _vld4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x4_t; + } + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: int8x16x4_t = _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" + )] + fn _vld4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x4_t; + } + _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" + )] + fn _vld4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x4_t; + } + let mut b: int64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: int64x2x4_t = _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: poly64x2x4_t = transmute(vld4q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld4q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: uint8x16x4_t = transmute(vld4q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4q_lane_s64::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: uint64x2x4_t = transmute(vld4q_lane_s64::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld4q_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let mut ret_val: poly8x16x4_t = transmute(vld4q_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { + transmute(vld4q_s64(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { + transmute(vld4q_s64(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmax))] +pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v1f64" + )] + fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmax_f64(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmax))] +pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f64" + )] + fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vmaxq_f64(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmax))] +pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f64" + )] + fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vmaxq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v1f64" + )] + fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmaxnm_f64(a, b) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f64" + )] + fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vmaxnmq_f64(a, b) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f64" + )] + fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vmaxnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vmaxnmv_f32(a: float32x2_t) -> f32; + } + _vmaxnmv_f32(a) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vmaxnmv_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxnmv_f32(a) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vmaxnmvq_f64(a: float64x2_t) -> f64; + } + _vmaxnmvq_f64(a) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vmaxnmvq_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxnmvq_f64(a) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" + )] + fn _vmaxnmvq_f32(a: float32x4_t) -> f32; + } + _vmaxnmvq_f32(a) +} + +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" + )] + fn _vmaxnmvq_f32(a: float32x4_t) -> f32; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxnmvq_f32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" + )] + fn _vmaxv_f32(a: float32x2_t) -> f32; + } + _vmaxv_f32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" + )] + fn _vmaxv_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxv_f32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" + )] + fn _vmaxvq_f32(a: float32x4_t) -> f32; + } + _vmaxvq_f32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" + )] + fn _vmaxvq_f32(a: float32x4_t) -> f32; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxvq_f32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" + )] + fn _vmaxvq_f64(a: float64x2_t) -> f64; + } + _vmaxvq_f64(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" + )] + fn _vmaxvq_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxvq_f64(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" + )] + fn _vmaxv_s8(a: int8x8_t) -> i8; + } + _vmaxv_s8(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" + )] + fn _vmaxv_s8(a: int8x8_t) -> i8; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vmaxv_s8(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" + )] + fn _vmaxvq_s8(a: int8x16_t) -> i8; + } + _vmaxvq_s8(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" + )] + fn _vmaxvq_s8(a: int8x16_t) -> i8; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vmaxvq_s8(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" + )] + fn _vmaxv_s16(a: int16x4_t) -> i16; + } + _vmaxv_s16(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" + )] + fn _vmaxv_s16(a: int16x4_t) -> i16; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxv_s16(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" + )] + fn _vmaxvq_s16(a: int16x8_t) -> i16; + } + _vmaxvq_s16(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" + )] + fn _vmaxvq_s16(a: int16x8_t) -> i16; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vmaxvq_s16(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" + )] + fn _vmaxv_s32(a: int32x2_t) -> i32; + } + _vmaxv_s32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" + )] + fn _vmaxv_s32(a: int32x2_t) -> i32; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxv_s32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" + )] + fn _vmaxvq_s32(a: int32x4_t) -> i32; + } + _vmaxvq_s32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" + )] + fn _vmaxvq_s32(a: int32x4_t) -> i32; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxvq_s32(a) +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" + )] + fn _vmaxv_u8(a: int8x8_t) -> i8; + } + _vmaxv_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" + )] + fn _vmaxv_u8(a: int8x8_t) -> i8; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vmaxv_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" + )] + fn _vmaxvq_u8(a: int8x16_t) -> i8; + } + _vmaxvq_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" + )] + fn _vmaxvq_u8(a: int8x16_t) -> i8; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vmaxvq_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" + )] + fn _vmaxv_u16(a: int16x4_t) -> i16; + } + _vmaxv_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" + )] + fn _vmaxv_u16(a: int16x4_t) -> i16; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxv_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" + )] + fn _vmaxvq_u16(a: int16x8_t) -> i16; + } + _vmaxvq_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" + )] + fn _vmaxvq_u16(a: int16x8_t) -> i16; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vmaxvq_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" + )] + fn _vmaxv_u32(a: int32x2_t) -> i32; + } + _vmaxv_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" + )] + fn _vmaxv_u32(a: int32x2_t) -> i32; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + _vmaxv_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" + )] + fn _vmaxvq_u32(a: int32x4_t) -> i32; + } + _vmaxvq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" + )] + fn _vmaxvq_u32(a: int32x4_t) -> i32; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vmaxvq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmin))] +pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v1f64" + )] + fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmin_f64(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmin))] +pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f64" + )] + fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vminq_f64(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmin))] +pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f64" + )] + fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vminq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminnm))] +pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v1f64" + )] + fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vminnm_f64(a, b) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminnm))] +pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f64" + )] + fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vminnmq_f64(a, b) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminnm))] +pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f64" + )] + fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vminnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vminnmv_f32(a: float32x2_t) -> f32; + } + _vminnmv_f32(a) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vminnmv_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vminnmv_f32(a) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vminnmvq_f64(a: float64x2_t) -> f64; + } + _vminnmvq_f64(a) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vminnmvq_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vminnmvq_f64(a) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" + )] + fn _vminnmvq_f32(a: float32x4_t) -> f32; + } + _vminnmvq_f32(a) +} + +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmv))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" + )] + fn _vminnmvq_f32(a: float32x4_t) -> f32; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminnmvq_f32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vminv_f32(a: float32x2_t) -> f32; + } + _vminv_f32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vminv_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vminv_f32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminv))] +pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v4f32" + )] + fn _vminvq_f32(a: float32x4_t) -> f32; + } + _vminvq_f32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminv))] +pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v4f32" + )] + fn _vminvq_f32(a: float32x4_t) -> f32; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminvq_f32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vminvq_f64(a: float64x2_t) -> f64; + } + _vminvq_f64(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vminvq_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vminvq_f64(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i8.v8i8" + )] + fn _vminv_s8(a: int8x8_t) -> i8; + } + _vminv_s8(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i8.v8i8" + )] + fn _vminv_s8(a: int8x8_t) -> i8; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vminv_s8(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i8.v16i8" + )] + fn _vminvq_s8(a: int8x16_t) -> i8; + } + _vminvq_s8(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i8.v16i8" + )] + fn _vminvq_s8(a: int8x16_t) -> i8; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vminvq_s8(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i16.v4i16" + )] + fn _vminv_s16(a: int16x4_t) -> i16; + } + _vminv_s16(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i16.v4i16" + )] + fn _vminv_s16(a: int16x4_t) -> i16; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminv_s16(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i16.v8i16" + )] + fn _vminvq_s16(a: int16x8_t) -> i16; + } + _vminvq_s16(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i16.v8i16" + )] + fn _vminvq_s16(a: int16x8_t) -> i16; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vminvq_s16(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v2i32" + )] + fn _vminv_s32(a: int32x2_t) -> i32; + } + _vminv_s32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v2i32" + )] + fn _vminv_s32(a: int32x2_t) -> i32; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + _vminv_s32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v4i32" + )] + fn _vminvq_s32(a: int32x4_t) -> i32; + } + _vminvq_s32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v4i32" + )] + fn _vminvq_s32(a: int32x4_t) -> i32; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminvq_s32(a) +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v8i8" + )] + fn _vminv_u8(a: int8x8_t) -> i8; + } + _vminv_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v8i8" + )] + fn _vminv_u8(a: int8x8_t) -> i8; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vminv_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v16i8" + )] + fn _vminvq_u8(a: int8x16_t) -> i8; + } + _vminvq_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v16i8" + )] + fn _vminvq_u8(a: int8x16_t) -> i8; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + _vminvq_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v4i16" + )] + fn _vminv_u16(a: int16x4_t) -> i16; + } + _vminv_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v4i16" + )] + fn _vminv_u16(a: int16x4_t) -> i16; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminv_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v8i16" + )] + fn _vminvq_u16(a: int16x8_t) -> i16; + } + _vminvq_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v8i16" + )] + fn _vminvq_u16(a: int16x8_t) -> i16; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + _vminvq_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v2i32" + )] + fn _vminv_u32(a: int32x2_t) -> i32; + } + _vminv_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v2i32" + )] + fn _vminv_u32(a: int32x2_t) -> i32; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + _vminv_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v4i32" + )] + fn _vminvq_u32(a: int32x4_t) -> i32; + } + _vminvq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v4i32" + )] + fn _vminvq_u32(a: int32x4_t) -> i32; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + _vminvq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vmlal_high_s16(a, b, vdupq_n_s16(c)) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlal_high_s16(a, b, vdupq_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vmlal_high_s32(a, b, vdupq_n_s32(c)) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlal_high_s32(a, b, vdupq_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + vmlal_high_u16(a, b, vdupq_n_u16(c)) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlal_high_u16(a, b, vdupq_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + vmlal_high_u32(a, b, vdupq_n_u32(c)) +} + +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlal_high_u32(a, b, vdupq_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlal_s8(a, b, c) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = vmlal_s8(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlal_s16(a, b, c) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let ret_val: int32x4_t = vmlal_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlal_s32(a, b, c) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + let ret_val: int64x2_t = vmlal_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlal_u8(a, b, c) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = vmlal_u8(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlal_u16(a, b, c) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlal_u16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlal_u32(a, b, c) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + let ret_val: uint64x2_t = vmlal_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float64x2_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vmlsl_high_s16(a, b, vdupq_n_s16(c)) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlsl_high_s16(a, b, vdupq_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vmlsl_high_s32(a, b, vdupq_n_s32(c)) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlsl_high_s32(a, b, vdupq_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + vmlsl_high_u16(a, b, vdupq_n_u16(c)) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlsl_high_u16(a, b, vdupq_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + vmlsl_high_u32(a, b, vdupq_n_u32(c)) +} + +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlsl_high_u32(a, b, vdupq_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlsl_s8(a, b, c) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = vmlsl_s8(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlsl_s16(a, b, c) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let ret_val: int32x4_t = vmlsl_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlsl_s32(a, b, c) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + let ret_val: int64x2_t = vmlsl_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlsl_u8(a, b, c) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = vmlsl_u8(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlsl_u16(a, b, c) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlsl_u16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlsl_u32(a, b, c) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + let ret_val: uint64x2_t = vmlsl_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vmovl_s8(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = vmovl_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vmovl_s16(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let ret_val: int32x4_t = vmovl_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + vmovl_s32(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let ret_val: int64x2_t = vmovl_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vmovl_u8(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = vmovl_u8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vmovl_u16(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let ret_val: uint32x4_t = vmovl_u16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + vmovl_u32(a) +} + +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let ret_val: uint64x2_t = vmovl_u32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + let c: int8x8_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_cast(b); + let ret_val: int8x16_t = + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + let c: int16x4_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_cast(b); + let ret_val: int16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + let c: int32x2_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_cast(b); + let ret_val: int32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + let c: uint8x8_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_cast(b); + let ret_val: uint8x16_t = + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + let c: uint16x4_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_cast(b); + let ret_val: uint16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + let c: uint32x2_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3]) +} + +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_cast(b); + let ret_val: uint32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmul))] +pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmul))] +pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmul))] +pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { + simd_mul(a, vdup_n_f64(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { + simd_mul(a, vdupq_n_f64(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_mul(a, vdupq_n_f64(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { + static_assert!(LANE == 0); + let b: f64 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_s16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmull_high_s16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - 11 => simd_shuffle!( - a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_high_s16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmull_high_s16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - 12 => simd_shuffle!( - a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_u16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - 13 => simd_shuffle!( - a, + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmull_high_u16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - 14 => simd_shuffle!( - a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_high_u16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - 15 => simd_shuffle!( - a, + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmull_high_u16( + a, + simd_shuffle!( + b, b, [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 ] ), - _ => unreachable_unchecked(), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + vmull_high_s16(a, vdupq_n_s16(b)) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmull_high_s16(a, vdupq_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + vmull_high_s32(a, vdupq_n_s32(b)) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmull_high_s32(a, vdupq_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { + vmull_high_u16(a, vdupq_n_u16(b)) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmull_high_u16(a, vdupq_n_u16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { + vmull_high_u32(a, vdupq_n_u32(b)) +} + +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(umull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmull_high_u32(a, vdupq_n_u32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { + vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_p8(a, b) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly16x8_t = vmull_p8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_s8(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = vmull_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vmull_s16(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: int32x4_t = vmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + vmull_s32(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: int64x2_t = vmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_u8(a, b) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = vmull_u8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vmull_u16(a, b) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: uint32x4_t = vmull_u16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + vmull_u32(a, b) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: uint64x2_t = vmull_u32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmull64" + )] + fn _vmull_p64(a: p64, b: p64) -> int8x16_t; + } + transmute(_vmull_p64(a, b)) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + let b: f32 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: f32 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + let b: f32 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: f32 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + let b: f64 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let b: f64 = simd_extract!(b, LANE as u32); + a * b +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v2f32" + )] + fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vmulx_f32(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v2f32" + )] + fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vmulx_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v4f32" + )] + fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vmulxq_f32(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v4f32" + )] + fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vmulxq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v1f64" + )] + fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmulx_f64(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v2f64" + )] + fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vmulxq_f64(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.v2f64" + )] + fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vmulxq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x4_t = vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.f64" + )] + fn _vmulxd_f64(a: f64, b: f64) -> f64; + } + _vmulxd_f64(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.f32" + )] + fn _vmulxs_f32(a: f32, b: f32) -> f32; + } + _vmulxs_f32(a, b) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { + static_assert!(LANE == 0); + vmulxd_f64(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + vmulxd_f64(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + vmulxd_f64(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fneg))] +pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fneg))] +pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fneg))] +pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vnegd_s64(a: i64) -> i64 { + a.wrapping_neg() +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { + let a1: f64 = simd_extract!(a, 0); + let a2: f64 = simd_extract!(a, 1); + a1 + a2 +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let a1: f64 = simd_extract!(a, 0); + let a2: f64 = simd_extract!(a, 1); + a1 + a2 +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { + let a1: f32 = simd_extract!(a, 0); + let a2: f32 = simd_extract!(a, 1); + a1 + a2 +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let a1: f32 = simd_extract!(a, 0); + let a2: f32 = simd_extract!(a, 1); + a1 + a2 +} + +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { + transmute(vaddvq_u64(transmute(a))) +} + +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(vaddvq_u64(transmute(a))) +} + +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { + vaddvq_u64(a) +} + +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + vaddvq_u64(a) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v4f32" + )] + fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpaddq_f32(a, b) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v4f32" + )] + fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vpaddq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f64" + )] + fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpaddq_f64(a, b) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f64" + )] + fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vpaddq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v16i8" + )] + fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpaddq_s8(a, b) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v16i8" + )] + fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vpaddq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i16" + )] + fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpaddq_s16(a, b) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i16" + )] + fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vpaddq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i32" + )] + fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpaddq_s32(a, b) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i32" + )] + fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vpaddq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i64" + )] + fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vpaddq_s64(a, b) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i64" + )] + fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vpaddq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + transmute(vpaddq_s8(transmute(a), transmute(b))) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + transmute(vpaddq_s16(transmute(a), transmute(b))) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + transmute(vpaddq_s32(transmute(a), transmute(b))) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + transmute(vpaddq_s64(transmute(a), transmute(b))) +} + +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" + )] + fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmaxnm_f32(a, b) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" + )] + fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vpmaxnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" + )] + fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpmaxnmq_f32(a, b) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" + )] + fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vpmaxnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" + )] + fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpmaxnmq_f64(a, b) +} + +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" + )] + fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vpmaxnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; + } + _vpmaxnmqd_f64(a) +} + +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vpmaxnmqd_f64(a) +} + +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vpmaxnms_f32(a: float32x2_t) -> f32; + } + _vpmaxnms_f32(a) +} + +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vpmaxnms_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpmaxnms_f32(a) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v4f32" + )] + fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpmaxq_f32(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v4f32" + )] + fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vpmaxq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f64" + )] + fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpmaxq_f64(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f64" + )] + fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vpmaxq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v16i8" + )] + fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpmaxq_s8(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v16i8" + )] + fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vpmaxq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i16" + )] + fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpmaxq_s16(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i16" + )] + fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vpmaxq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i32" + )] + fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpmaxq_s32(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i32" + )] + fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vpmaxq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v16i8" + )] + fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v16i8" + )] + fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i16" + )] + fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i16" + )] + fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i32" + )] + fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i32" + )] + fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" + )] + fn _vpmaxqd_f64(a: float64x2_t) -> f64; + } + _vpmaxqd_f64(a) +} + +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" + )] + fn _vpmaxqd_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vpmaxqd_f64(a) +} + +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" + )] + fn _vpmaxs_f32(a: float32x2_t) -> f32; + } + _vpmaxs_f32(a) +} + +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" + )] + fn _vpmaxs_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpmaxs_f32(a) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f32" + )] + fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpminnm_f32(a, b) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f32" + )] + fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vpminnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v4f32" + )] + fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpminnmq_f32(a, b) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v4f32" + )] + fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vpminnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f64" + )] + fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpminnmq_f64(a, b) +} + +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f64" + )] + fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vpminnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vpminnmqd_f64(a: float64x2_t) -> f64; + } + _vpminnmqd_f64(a) +} + +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vpminnmqd_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vpminnmqd_f64(a) +} + +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vpminnms_f32(a: float32x2_t) -> f32; + } + _vpminnms_f32(a) +} + +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vpminnms_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpminnms_f32(a) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v4f32" + )] + fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpminq_f32(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v4f32" + )] + fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vpminq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f64" + )] + fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpminq_f64(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f64" + )] + fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vpminq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v16i8" + )] + fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpminq_s8(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v16i8" + )] + fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vpminq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i16" + )] + fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpminq_s16(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i16" + )] + fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vpminq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i32" + )] + fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpminq_s32(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i32" + )] + fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vpminq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v16i8" + )] + fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v16i8" + )] + fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i16" + )] + fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i16" + )] + fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i32" + )] + fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i32" + )] + fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vpminqd_f64(a: float64x2_t) -> f64; + } + _vpminqd_f64(a) +} + +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vpminqd_f64(a: float64x2_t) -> f64; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + _vpminqd_f64(a) +} + +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vpmins_f32(a: float32x2_t) -> f32; + } + _vpmins_f32(a) +} + +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vpmins_f32(a: float32x2_t) -> f32; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpmins_f32(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v1i64" + )] + fn _vqabs_s64(a: int64x1_t) -> int64x1_t; + } + _vqabs_s64(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i64" + )] + fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; + } + _vqabsq_s64(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i64" + )] + fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vqabsq_s64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsb_s8(a: i8) -> i8 { + simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) +} + +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsh_s16(a: i16) -> i16 { + simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) +} + +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabss_s32(a: i32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.i32" + )] + fn _vqabss_s32(a: i32) -> i32; + } + _vqabss_s32(a) +} + +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsd_s64(a: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.i64" + )] + fn _vqabsd_s64(a: i64) -> i64; + } + _vqabsd_s64(a) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqadd_s8(a, b), 0) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqadd_s16(a, b), 0) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: uint8x8_t = vdup_n_u8(b); + simd_extract!(vqadd_u8(a, b), 0) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: uint16x4_t = vdup_n_u16(b); + simd_extract!(vqadd_u16(a, b), 0) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.i32" + )] + fn _vqadds_s32(a: i32, b: i32) -> i32; + } + _vqadds_s32(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.i64" + )] + fn _vqaddd_s64(a: i64, b: i64) -> i64; + } + _vqaddd_s64(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.i32" + )] + fn _vqadds_u32(a: i32, b: i32) -> i32; + } + _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.i64" + )] + fn _vqaddd_u64(a: i64, b: i64) -> i64; + } + _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vqaddq_s32(a, vqdmull_high_n_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_n_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + vqaddq_s32(a, vqdmull_high_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vqaddq_s64(a, vqdmull_high_n_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_n_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + vqaddq_s64(a, vqdmull_high_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_laneq_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_laneq_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { + let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); + vqadds_s32(a, simd_extract!(x, 0)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { + let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c)); + x as i64 +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vqsubq_s32(a, vqdmull_high_n_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_n_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + vqsubq_s32(a, vqdmull_high_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vqsubq_s64(a, vqdmull_high_n_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_n_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + vqsubq_s64(a, vqdmull_high_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_laneq_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_laneq_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { + let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); + vqsubs_s32(a, simd_extract!(x, 0)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { + let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c)); + x as i64 +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(N, 3); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(N, 3); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqdmulh_s16(a, b), 0) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + simd_extract!(vqdmulh_s32(a, b), 0) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = vdup_n_s16(b); + vqdmull_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = vdup_n_s16(b); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = vdup_n_s32(b); + vqdmull_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = vdup_n_s32(b); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vqdmull_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + vqdmull_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { + static_assert_uimm_bits!(N, 2); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { + static_assert_uimm_bits!(N, 2); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { + static_assert_uimm_bits!(N, 3); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { + static_assert_uimm_bits!(N, 3); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqdmull_s16(a, b), 0) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulls.scalar" + )] + fn _vqdmulls_s32(a: i32, b: i32) -> i64; + } + _vqdmulls_s32(a, b) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + simd_shuffle!( + a, + vqmovn_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + vqmovn_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + simd_shuffle!( + a, + vqmovn_u16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqmovn_u16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovnd_s64(a: i64) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" + )] + fn _vqmovnd_s64(a: i64) -> i32; + } + _vqmovnd_s64(a) +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovnd_u64(a: u64) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" + )] + fn _vqmovnd_u64(a: i64) -> i32; + } + _vqmovnd_u64(a.as_signed()).as_unsigned() +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovnh_s16(a: i16) -> i8 { + simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovns_s32(a: i32) -> i16 { + simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovnh_u16(a: u16) -> u8 { + simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) +} + +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovns_u32(a: u32) -> u16 { + simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + simd_shuffle!( + a, + vqmovun_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqmovun_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovunh_s16(a: i16) -> u8 { + simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovuns_s32(a: i32) -> u16 { + simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqmovund_s64(a: i64) -> u32 { + simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v1i64" + )] + fn _vqneg_s64(a: int64x1_t) -> int64x1_t; + } + _vqneg_s64(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i64" + )] + fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; + } + _vqnegq_s64(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i64" + )] + fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = _vqnegq_s64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegb_s8(a: i8) -> i8 { + simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegh_s16(a: i16) -> i16 { + simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegs_s32(a: i32) -> i32 { + simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegd_s64(a: i64) -> i64 { + simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlah_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlah_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlah_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlah_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlahq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlahq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlahq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlahq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" + )] + fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + _vqrdmlah_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" + )] + fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrdmlah_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" + )] + fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _vqrdmlahq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" + )] + fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqrdmlahq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" + )] + fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + _vqrdmlah_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" + )] + fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = _vqrdmlah_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" + )] + fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vqrdmlahq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" + )] + fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqrdmlahq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + let c: int16x4_t = vdup_n_s16(c); + simd_extract!(vqrdmlah_s16(a, b, c), 0) +} + +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + let c: int32x2_t = vdup_n_s32(c); + simd_extract!(vqrdmlah_s32(a, b, c), 0) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlsh_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlsh_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlsh_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlsh_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlshq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlshq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlshq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlshq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" + )] + fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + _vqrdmlsh_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" + )] + fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrdmlsh_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" + )] + fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _vqrdmlshq_s16(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" + )] + fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqrdmlshq_s16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" + )] + fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + _vqrdmlsh_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" + )] + fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = _vqrdmlsh_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" + )] + fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vqrdmlshq_s32(a, b, c) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" + )] + fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqrdmlshq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + let c: int16x4_t = vdup_n_s16(c); + simd_extract!(vqrdmlsh_s16(a, b, c), 0) +} + +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + let c: int32x2_t = vdup_n_s32(c); + simd_extract!(vqrdmlsh_s32(a, b, c), 0) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { + simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { + simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqrshl_s8(a, b), 0) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqrshl_s16(a, b), 0) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqrshl_u8(a, b), 0) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqrshl_u16(a, b), 0) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.i64" + )] + fn _vqrshld_s64(a: i64, b: i64) -> i64; + } + _vqrshld_s64(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.i32" + )] + fn _vqrshls_s32(a: i32, b: i32) -> i32; + } + _vqrshls_s32(a, b) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.i32" + )] + fn _vqrshls_u32(a: i32, b: i32) -> i32; + } + _vqrshls_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.i64" + )] + fn _vqrshld_u64(a: i64, b: i64) -> i64; + } + _vqrshld_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + vqrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { + static_assert!(N >= 1 && N <= 32); + let a: uint64x2_t = vdupq_n_u64(a); + simd_extract!(vqrshrn_n_u64::(a), 0) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { + static_assert!(N >= 1 && N <= 8); + let a: uint16x8_t = vdupq_n_u16(a); + simd_extract!(vqrshrn_n_u16::(a), 0) +} + +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { + static_assert!(N >= 1 && N <= 16); + let a: uint32x4_t = vdupq_n_u32(a); + simd_extract!(vqrshrn_n_u32::(a), 0) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { + static_assert!(N >= 1 && N <= 8); + let a: int16x8_t = vdupq_n_s16(a); + simd_extract!(vqrshrn_n_s16::(a), 0) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { + static_assert!(N >= 1 && N <= 16); + let a: int32x4_t = vdupq_n_s32(a); + simd_extract!(vqrshrn_n_s32::(a), 0) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { + static_assert!(N >= 1 && N <= 32); + let a: int64x2_t = vdupq_n_s64(a); + simd_extract!(vqrshrn_n_s64::(a), 0) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrun_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqrshrun_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { + static_assert!(N >= 1 && N <= 32); + let a: int64x2_t = vdupq_n_s64(a); + simd_extract!(vqrshrun_n_s64::(a), 0) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { + static_assert!(N >= 1 && N <= 8); + let a: int16x8_t = vdupq_n_s16(a); + simd_extract!(vqrshrun_n_s16::(a), 0) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { + static_assert!(N >= 1 && N <= 16); + let a: int32x4_t = vdupq_n_s32(a); + simd_extract!(vqrshrun_n_s32::(a), 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshl_n_s8::(vdup_n_s8(a)), 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshld_n_s64(a: i64) -> i64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshl_n_s64::(vdup_n_s64(a)), 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshl_n_s16::(vdup_n_s16(a)), 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshls_n_s32(a: i32) -> i32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshl_n_s32::(vdup_n_s32(a)), 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshl_n_u8::(vdup_n_u8(a)), 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshld_n_u64(a: u64) -> u64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshl_n_u64::(vdup_n_u64(a)), 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshl_n_u16::(vdup_n_u16(a)), 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshls_n_u32(a: u32) -> u32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshl_n_u32::(vdup_n_u32(a)), 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { + let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); + simd_extract!(c, 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { + let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); + simd_extract!(c, 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { + let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); + simd_extract!(c, 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { + let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); + simd_extract!(c, 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { + let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); + simd_extract!(c, 0) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { + let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); + simd_extract!(c, 0) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.i64" + )] + fn _vqshld_s64(a: i64, b: i64) -> i64; + } + _vqshld_s64(a, b) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.i64" + )] + fn _vqshld_u64(a: i64, b: i64) -> i64; + } + _vqshld_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshlu_n_s8::(vdup_n_s8(a)), 0) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshlu_n_s64::(vdup_n_s64(a)), 0) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshlu_n_s16::(vdup_n_s16(a)), 0) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshlu_n_s32::(vdup_n_s32(a)), 0) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + vqshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.i32" + )] + fn _vqshrnd_n_s64(a: i64, n: i32) -> i32; + } + _vqshrnd_n_s64(a, N) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.i32" + )] + fn _vqshrnd_n_u64(a: i64, n: i32) -> i32; + } + _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { + static_assert!(N >= 1 && N <= 8); + simd_extract!(vqshrn_n_s16::(vdupq_n_s16(a)), 0) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrn_n_s32::(vdupq_n_s32(a)), 0) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { + static_assert!(N >= 1 && N <= 8); + simd_extract!(vqshrn_n_u16::(vdupq_n_u16(a)), 0) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrn_n_u32::(vdupq_n_u32(a)), 0) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqshrun_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vqshrun_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { + static_assert!(N >= 1 && N <= 32); + simd_extract!(vqshrun_n_s64::(vdupq_n_s64(a)), 0) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { + static_assert!(N >= 1 && N <= 8); + simd_extract!(vqshrun_n_s16::(vdupq_n_s16(a)), 0) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrun_n_s32::(vdupq_n_s32(a)), 0) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqsub_s8(a, b), 0) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqsub_s16(a, b), 0) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: uint8x8_t = vdup_n_u8(b); + simd_extract!(vqsub_u8(a, b), 0) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: uint16x4_t = vdup_n_u16(b); + simd_extract!(vqsub_u16(a, b), 0) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.i32" + )] + fn _vqsubs_s32(a: i32, b: i32) -> i32; + } + _vqsubs_s32(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.i64" + )] + fn _vqsubd_s64(a: i64, b: i64) -> i64; + } + _vqsubd_s64(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.i32" + )] + fn _vqsubs_u32(a: i32, b: i32) -> i32; + } + _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.i64" + )] + fn _vqsubd_u64(a: i64, b: i64) -> i64; + } + _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl1.v8i8" + )] + fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; + } + _vqtbl1(a, b.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl1.v8i8" + )] + fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbl1(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl1.v16i8" + )] + fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqtbl1q(a, b.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl1.v16i8" + )] + fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbl1q(a, b.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + vqtbl1(a, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl1(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + vqtbl1q(a, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbl1q(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { + let x = transmute(vqtbl1(transmute(a), b)); + x +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = transmute(vqtbl1(transmute(a), b)); + let ret_val: uint8x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let x = transmute(vqtbl1q(transmute(a), b)); + x +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x = transmute(vqtbl1q(transmute(a), b)); + let ret_val: uint8x16_t = x; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { + let x = transmute(vqtbl1(transmute(a), b)); + x +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = transmute(vqtbl1(transmute(a), b)); + let ret_val: poly8x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { + let x = transmute(vqtbl1q(transmute(a), b)); + x +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x = transmute(vqtbl1q(transmute(a), b)); + let ret_val: poly8x16_t = x; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v8i8" + )] + fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + } + _vqtbl2(a, b, c.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v8i8" + )] + fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbl2(a, b, c.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v16i8" + )] + fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vqtbl2q(a, b, c.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v16i8" + )] + fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbl2q(a, b, c.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { + vqtbl2(a.0, a.1, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl2(a.0, a.1, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + vqtbl2q(a.0, a.1, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbl2q(a.0, a.1, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl3.v8i8" + )] + fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; + } + _vqtbl3(a, b, c, d.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl3.v8i8" + )] + fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbl3(a, b, c, d.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl3.v16i8" + )] + fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; + } + _vqtbl3q(a, b, c, d.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl3.v16i8" + )] + fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbl3q(a, b, c, d.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { + vqtbl3(a.0, a.1, a.2, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl3(a.0, a.1, a.2, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { + vqtbl3q(a.0, a.1, a.2, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbl3q(a.0, a.1, a.2, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v8i8" + )] + fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) + -> int8x8_t; } + _vqtbl4(a, b, c, d, e.as_signed()) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_p16( - a: poly16x4_t, - b: poly16x8_t, -) -> poly16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), +unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v8i8" + )] + fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) + -> int8x8_t; } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbl4(a, b, c, d, e.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p16( - a: poly16x8_t, - b: poly16x8_t, -) -> poly16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), +unsafe fn vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v16i8" + )] + fn _vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; + } + _vqtbl4q(a, b, c, d, e.as_signed()) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v16i8" + )] + fn _vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbl4q(a, b, c, d, e.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { + vqtbl4(a.0, a.1, a.2, a.3, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl4(a.0, a.1, a.2, a.3, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + vqtbl4q(a.0, a.1, a.2, a.3, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbl4q(a.0, a.1, a.2, a.3, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v8i8" + )] + fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + } + _vqtbx1(a, b, c.as_signed()) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v8i8" + )] + fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbx1(a, b, c.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p64( - a: poly64x2_t, - b: poly64x2_t, -) -> poly64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), +unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v16i8" + )] + fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } + _vqtbx1q(a, b, c.as_signed()) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_f32( - a: float32x4_t, - b: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), +unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v16i8" + )] + fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbx1q(a, b, c.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_f64( - a: float64x2_t, - b: float64x1_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + vqtbx1(a, b, c) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s64( - a: int64x2_t, - b: int64x1_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx1(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u64( - a: uint64x2_t, - b: uint64x1_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + vqtbx1q(a, b, c) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p64( - a: poly64x2_t, - b: poly64x1_t, -) -> poly64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } +pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbx1q(a, b, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { - transmute(a) +pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + x } -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { - simd_cast(a) +pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + let ret_val: uint8x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { - simd_cast(a) +pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + x } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { - simd_cast(a) +pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + let ret_val: uint8x16_t = x; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { - simd_cast(a) +pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + x } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { - simd_cast(a) +pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + let ret_val: poly8x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { - simd_cast(a) +pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + x } -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) +pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + let ret_val: poly8x16_t = x; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { - let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); - simd_cast(b) +unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx2.v8i8" + )] + fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; + } + _vqtbx2(a, b, c, d.as_signed()) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" + link_name = "llvm.aarch64.neon.tbx2.v8i8" )] - fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t; + fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; } - _vcvt_n_f64_s64(a, N) + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbx2(a, b, c, d.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" + link_name = "llvm.aarch64.neon.tbx2.v16i8" )] - fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; + fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; } - _vcvtq_n_f64_s64(a, N) + _vqtbx2q(a, b, c, d.as_signed()) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" + link_name = "llvm.aarch64.neon.tbx2.v16i8" )] - fn _vcvt_n_f64_u64(a: int64x1_t, n: i32) -> float64x1_t; + fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; } - _vcvt_n_f64_u64(a.as_signed(), N) + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbx2q(a, b, c, d.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { + vqtbx2(a, b.0, b.1, c) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x2_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx2(a, b.0, b.1, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + vqtbx2q(a, b.0, b.1, c) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" - )] - fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; - } - _vcvtq_n_f64_u64(a.as_signed(), N) +pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x2_t = b; + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbx2q(a, b.0, b.1, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" - )] - fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t; - } - _vcvt_n_s64_f64(a, N) +pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" - )] - fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - _vcvtq_n_s64_f64(a, N) +pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" - )] - fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> int64x1_t; - } - _vcvt_n_u64_f64(a, N).as_unsigned() +pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" - )] - fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - _vcvtq_n_u64_f64(a, N).as_unsigned() +pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x2_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v1i64.v1f64" - )] - fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvt_s64_f64(a) +pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i64.v2f64" - )] - fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtq_s64_f64(a) +pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v1i64.v1f64" - )] - fn _vcvt_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvt_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i64.v2f64" - )] - fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtq_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x2_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" + link_name = "llvm.aarch64.neon.tbx3.v8i8" )] - fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; + fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; } - _vcvta_s32_f32(a) + _vqtbx3(a, b, c, d, e.as_signed()) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" + link_name = "llvm.aarch64.neon.tbx3.v8i8" )] - fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; + fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; } - _vcvtaq_s32_f32(a) + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbx3(a, b, c, d, e.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" + link_name = "llvm.aarch64.neon.tbx3.v16i8" )] - fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t; + fn _vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; } - _vcvta_s64_f64(a) + _vqtbx3q(a, b, c, d, e.as_signed()) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" + link_name = "llvm.aarch64.neon.tbx3.v16i8" )] - fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; + fn _vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; } - _vcvtaq_s64_f64(a) + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: uint8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbx3q(a, b, c, d, e.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" - )] - fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvta_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { + vqtbx3(a, b.0, b.1, b.2, c) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" - )] - fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtaq_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x3_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx3(a, b.0, b.1, b.2, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" - )] - fn _vcvta_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvta_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { + vqtbx3q(a, b.0, b.1, b.2, c) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x3_t = b; + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbx3q(a, b.0, b.1, b.2, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" - )] - fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtaq_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.i32.f32" - )] - fn _vcvtas_s32_f32(a: f32) -> i32; - } - _vcvtas_s32_f32(a) +pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.i64.f64" - )] - fn _vcvtad_s64_f64(a: f64) -> i64; - } - _vcvtad_s64_f64(a) +pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.i32.f32" - )] - fn _vcvtas_u32_f32(a: f32) -> i32; - } - _vcvtas_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x3_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.i64.f64" - )] - fn _vcvtad_u64_f64(a: f64) -> i64; - } - _vcvtad_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { - a as f64 +pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { - a as f32 +pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" - )] - fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtm_s32_f32(a) +pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x3_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x8_t, +) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" + link_name = "llvm.aarch64.neon.tbx4.v8i8" )] - fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; + fn _vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x8_t, + ) -> int8x8_t; } - _vcvtmq_s32_f32(a) + _vqtbx4(a, b, c, d, e, f.as_signed()) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x8_t, +) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" + link_name = "llvm.aarch64.neon.tbx4.v8i8" )] - fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t; + fn _vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x8_t, + ) -> int8x8_t; } - _vcvtm_s64_f64(a) + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: int8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint8x8_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqtbx4(a, b, c, d, e, f.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" + link_name = "llvm.aarch64.neon.tbx4.v16i8" )] - fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; + fn _vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x16_t, + ) -> int8x16_t; } - _vcvtmq_s64_f64(a) + _vqtbx4q(a, b, c, d, e, f.as_signed()) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { +unsafe fn vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x16_t, +) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" + link_name = "llvm.aarch64.neon.tbx4.v16i8" )] - fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x16_t, + ) -> int8x16_t; } - _vcvtm_u32_f32(a).as_unsigned() + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let e: int8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint8x16_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqtbx4q(a, b, c, d, e, f.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" - )] - fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtmq_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { + vqtbx4(a, b.0, b.1, b.2, b.3, c) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" - )] - fn _vcvtm_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtm_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x4_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx4(a, b.0, b.1, b.2, b.3, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" - )] - fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtmq_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + vqtbx4q(a, b.0, b.1, b.2, b.3, c) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.i32.f32" - )] - fn _vcvtms_s32_f32(a: f32) -> i32; - } - _vcvtms_s32_f32(a) +pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x4_t = b; + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqtbx4q(a, b.0, b.1, b.2, b.3, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.i64.f64" - )] - fn _vcvtmd_s64_f64(a: f64) -> i64; - } - _vcvtmd_s64_f64(a) +pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" - )] - fn _vcvtms_u32_f32(a: f32) -> i32; - } - _vcvtms_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" - )] - fn _vcvtmd_u64_f64(a: f64) -> i64; - } - _vcvtmd_u64_f64(a).as_unsigned() +pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" - )] - fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtn_s32_f32(a) +pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x4_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" - )] - fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtnq_s32_f32(a) +pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" - )] - fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtn_s64_f64(a) +pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" - )] - fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtnq_s64_f64(a) +pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" - )] - fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtn_u32_f32(a).as_unsigned() +pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x4_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] + +#[doc = "Rotate and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(rax1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" + link_name = "llvm.aarch64.crypto.rax1" )] - fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vcvtnq_u32_f32(a).as_unsigned() + _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] + +#[doc = "Rotate and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(rax1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" + link_name = "llvm.aarch64.crypto.rax1" )] - fn _vcvtn_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vcvtn_u64_f64(a).as_unsigned() + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" + link_name = "llvm.aarch64.neon.rbit.v8i8" )] - fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vrbit_s8(a: int8x8_t) -> int8x8_t; } - _vcvtnq_u64_f64(a).as_unsigned() + _vrbit_s8(a) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.i32.f32" + link_name = "llvm.aarch64.neon.rbit.v8i8" )] - fn _vcvtns_s32_f32(a: f32) -> i32; + fn _vrbit_s8(a: int8x8_t) -> int8x8_t; } - _vcvtns_s32_f32(a) + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrbit_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.i64.f64" + link_name = "llvm.aarch64.neon.rbit.v16i8" )] - fn _vcvtnd_s64_f64(a: f64) -> i64; + fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; } - _vcvtnd_s64_f64(a) + _vrbitq_s8(a) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" + link_name = "llvm.aarch64.neon.rbit.v16i8" )] - fn _vcvtns_u32_f32(a: f32) -> i32; + fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; } - _vcvtns_u32_f32(a).as_unsigned() + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vrbitq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" - )] - fn _vcvtnd_u64_f64(a: f64) -> i64; - } - _vcvtnd_u64_f64(a).as_unsigned() +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vrbit_s8(transmute(a))) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" - )] - fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtp_s32_f32(a) +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" - )] - fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtpq_s32_f32(a) +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vrbitq_s8(transmute(a))) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" - )] - fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtp_s64_f64(a) +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" - )] - fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtpq_s64_f64(a) +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { + transmute(vrbit_s8(transmute(a))) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" - )] - fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtp_u32_f32(a).as_unsigned() +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" - )] - fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtpq_u32_f32(a).as_unsigned() +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { + transmute(vrbitq_s8(transmute(a))) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] + +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" - )] - fn _vcvtp_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtp_u64_f64(a).as_unsigned() +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] +#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" + link_name = "llvm.aarch64.neon.frecpe.v1f64" )] - fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vrecpe_f64(a: float64x1_t) -> float64x1_t; } - _vcvtpq_u64_f64(a).as_unsigned() + _vrecpe_f64(a) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] +#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { +pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.i32.f32" + link_name = "llvm.aarch64.neon.frecpe.v2f64" )] - fn _vcvtps_s32_f32(a: f32) -> i32; + fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; } - _vcvtps_s32_f32(a) + _vrecpeq_f64(a) } -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] +#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { +pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.i64.f64" + link_name = "llvm.aarch64.neon.frecpe.v2f64" )] - fn _vcvtpd_s64_f64(a: f64) -> i64; + fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; } - _vcvtpd_s64_f64(a) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrecpeq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] +#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { +pub unsafe fn vrecped_f64(a: f64) -> f64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" + link_name = "llvm.aarch64.neon.frecpe.f64" )] - fn _vcvtps_u32_f32(a: f32) -> i32; + fn _vrecped_f64(a: f64) -> f64; } - _vcvtps_u32_f32(a).as_unsigned() + _vrecped_f64(a) } -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] +#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { +pub unsafe fn vrecpes_f32(a: f32) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" + link_name = "llvm.aarch64.neon.frecpe.f32" )] - fn _vcvtpd_u64_f64(a: f64) -> i64; + fn _vrecpes_f32(a: f32) -> f32; } - _vcvtpd_u64_f64(a).as_unsigned() -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { - a as f32 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { - a as f64 + _vrecpes_f32(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" + link_name = "llvm.aarch64.neon.frecps.v1f64" )] - fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32; + fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; } - _vcvts_n_f32_s32(a, N) + _vrecps_f64(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" + link_name = "llvm.aarch64.neon.frecps.v2f64" )] - fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64; + fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - _vcvtd_n_f64_s64(a, N) + _vrecpsq_f64(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" + link_name = "llvm.aarch64.neon.frecps.v2f64" )] - fn _vcvts_n_f32_u32(a: i32, n: i32) -> f32; + fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - _vcvts_n_f32_u32(a.as_signed(), N) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vrecpsq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" + link_name = "llvm.aarch64.neon.frecps.f64" )] - fn _vcvtd_n_f64_u64(a: i64, n: i32) -> f64; + fn _vrecpsd_f64(a: f64, b: f64) -> f64; } - _vcvtd_n_f64_u64(a.as_signed(), N) + _vrecpsd_f64(a, b) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" + link_name = "llvm.aarch64.neon.frecps.f32" )] - fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32; + fn _vrecpss_f32(a: f32, b: f32) -> f32; } - _vcvts_n_s32_f32(a, N) + _vrecpss_f32(a, b) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] + +#[doc = "Floating-point reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { +pub unsafe fn vrecpxd_f64(a: f64) -> f64 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" + link_name = "llvm.aarch64.neon.frecpx.f64" )] - fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64; + fn _vrecpxd_f64(a: f64) -> f64; } - _vcvtd_n_s64_f64(a, N) + _vrecpxd_f64(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] + +#[doc = "Floating-point reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +pub unsafe fn vrecpxs_f32(a: f32) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" + link_name = "llvm.aarch64.neon.frecpx.f32" )] - fn _vcvts_n_u32_f32(a: f32, n: i32) -> i32; + fn _vrecpxs_f32(a: f32) -> f32; } - _vcvts_n_u32_f32(a, N).as_unsigned() + _vrecpxs_f32(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" - )] - fn _vcvtd_n_u64_f64(a: f64, n: i32) -> i64; - } - _vcvtd_n_u64_f64(a, N).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { + transmute(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { - a as i32 +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { - a as i64 +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { - a as u32 +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { - a as u64 +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { + transmute(a) } -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" - )] - fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; - } - _vcvtx_f32_f64(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { - simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_div(a, b) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { + transmute(a) } -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_div(a, b) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_div(a, b) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { + transmute(a) } -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_div(a, b) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_s32(a, b, transmute(c)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { + transmute(a) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_s32(a, b, transmute(c)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x16_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_u32(a, b, transmute(c)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { + transmute(a) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x16_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_u32(a, b, transmute(c)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { - static_assert!(N == 0); - a +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { - static_assert!(N == 0); - a +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { + transmute(a) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { + transmute(a) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v16i8" - )] - fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _veor3q_s8(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v8i16" - )] - fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _veor3q_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { + transmute(a) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v4i32" - )] - fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _veor3q_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v2i64" - )] - fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _veor3q_s64(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { + transmute(a) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v16i8" - )] - fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v8i16" - )] - fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { + transmute(a) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v4i32" - )] - fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v2i64" - )] - fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmadd))] -pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.v1f64" - )] - fn _vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - } - _vfma_f64(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, -) -> float64x1_t { - static_assert!(LANE == 0); - vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmadd))] -pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { - vfma_f64(a, b, vdup_n_f64(c)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f64" - )] - fn _vfmad_lane_f64(a: f64, b: f64, c: f64) -> f64; - } - static_assert!(LANE == 0); - let c: f64 = simd_extract!(c, LANE as u32); - _vfmad_lane_f64(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.v2f64" - )] - fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vfmaq_f64(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - vfmaq_f64(a, b, vdupq_n_f64(c)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 1); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_lane_f32(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 2); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_laneq_f32(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f64" - )] - fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; - } - static_assert_uimm_bits!(LANE, 1); - let c: f64 = simd_extract!(c, LANE as u32); - _vfmad_laneq_f64(b, c, a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - let b: float64x1_t = simd_neg(b); - vfma_f64(a, b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, -) -> float64x1_t { - static_assert!(LANE == 0); - vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { - vfms_f64(a, b, vdup_n_f64(c)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let b: float64x2_t = simd_neg(b); - vfmaq_f64(a, b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - vfmsq_f64(a, b, vdupq_n_f64(c)) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - vfmas_lane_f32::(a, -b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - vfmas_laneq_f32::(a, -b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - vfmad_lane_f64::(a, -b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - vfmad_laneq_f64::(a, -b, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" - )] - fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t; - } - _vld1_f64_x2(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" - )] - fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t; - } - _vld1_f64_x3(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" - )] - fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t; - } - _vld1_f64_x4(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" - )] - fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; - } - _vld1q_f64_x2(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" - )] - fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; - } - _vld1q_f64_x3(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" - )] - fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; - } - _vld1q_f64_x4(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" - )] - fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t; - } - _vld2_dup_f64(a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" - )] - fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; - } - _vld2q_dup_f64(a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" - )] - fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; - } - _vld2q_dup_s64(a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" - )] - fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t; - } - _vld2_f64(a as _) +pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" - )] - fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t; - } - _vld2_lane_f64(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" - )] - fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t; - } - _vld2_lane_s64(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { - static_assert!(LANE == 0); - transmute(vld2_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { - static_assert!(LANE == 0); - transmute(vld2_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { - transmute(vld2q_dup_s64(transmute(a))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { - transmute(vld2q_dup_s64(transmute(a))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" - )] - fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; - } - _vld2q_f64(a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" - )] - fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; - } - _vld2q_s64(a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" - )] - fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) - -> float64x2x2_t; - } - _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" - )] - fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; - } - _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" - )] - fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; - } - _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2q_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld2q_lane_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2q_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f32" + )] + fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd32x_f32(a) +} + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f32" + )] + fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnd32x_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld2q_lane_s8::(transmute(a), transmute(b))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v4f32" + )] + fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd32xq_f32(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { - transmute(vld2q_s64(transmute(a))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v4f32" + )] + fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrnd32xq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { - transmute(vld2q_s64(transmute(a))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f64" + )] + fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd32xq_f64(a) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" + link_name = "llvm.aarch64.neon.frint32x.v2f64" )] - fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t; + fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; } - _vld3_dup_f64(a as _) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrnd32xq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] + +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" + link_name = "llvm.aarch64.frint32x.f64" )] - fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; + fn _vrnd32x_f64(a: f64) -> f64; } - _vld3q_dup_f64(a as _) + transmute(_vrnd32x_f64(simd_extract!(a, 0))) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" + link_name = "llvm.aarch64.neon.frint32z.v2f32" )] - fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; + fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; } - _vld3q_dup_s64(a as _) + _vrnd32z_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" + link_name = "llvm.aarch64.neon.frint32z.v2f32" )] - fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t; + fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; } - _vld3_f64(a as _) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnd32z_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" + link_name = "llvm.aarch64.neon.frint32z.v4f32" )] - fn _vld3_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - n: i64, - ptr: *const i8, - ) -> float64x1x3_t; + fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; } - _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + _vrnd32zq_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { - static_assert!(LANE == 0); - transmute(vld3_lane_s64::(transmute(a), transmute(b))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v4f32" + )] + fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrnd32zq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" + link_name = "llvm.aarch64.neon.frint32z.v2f64" )] - fn _vld3_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - n: i64, - ptr: *const i8, - ) -> int64x1x3_t; + fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; } - _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + _vrnd32zq_f64(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { - static_assert!(LANE == 0); - transmute(vld3_lane_s64::(transmute(a), transmute(b))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v2f64" + )] + fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrnd32zq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] + +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { - transmute(vld3q_dup_s64(transmute(a))) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint32z.f64" + )] + fn _vrnd32z_f64(a: f64) -> f64; + } + transmute(_vrnd32z_f64(simd_extract!(a, 0))) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { - transmute(vld3q_dup_s64(transmute(a))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f32" + )] + fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd64x_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" + link_name = "llvm.aarch64.neon.frint64x.v2f32" )] - fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; + fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; } - _vld3q_f64(a as _) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnd64x_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" + link_name = "llvm.aarch64.neon.frint64x.v4f32" )] - fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; + fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; } - _vld3q_s64(a as _) + _vrnd64xq_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" + link_name = "llvm.aarch64.neon.frint64x.v4f32" )] - fn _vld3q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x3_t; + fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; } - _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrnd64xq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3q_lane_s64::(transmute(a), transmute(b))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f64" + )] + fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd64xq_f64(a) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" + link_name = "llvm.aarch64.neon.frint64x.v2f64" )] - fn _vld3q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x3_t; + fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; } - _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrnd64xq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] + +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" + link_name = "llvm.aarch64.frint64x.f64" )] - fn _vld3q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x3_t; + fn _vrnd64x_f64(a: f64) -> f64; } - _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + transmute(_vrnd64x_f64(simd_extract!(a, 0))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld3q_lane_s8::(transmute(a), transmute(b))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f32" + )] + fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd64z_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3q_lane_s64::(transmute(a), transmute(b))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f32" + )] + fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnd64z_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld3q_lane_s8::(transmute(a), transmute(b))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v4f32" + )] + fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd64zq_f32(a) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { - transmute(vld3q_s64(transmute(a))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v4f32" + )] + fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrnd64zq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { - transmute(vld3q_s64(transmute(a))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f64" + )] + fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd64zq_f64(a) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" + link_name = "llvm.aarch64.neon.frint64z.v2f64" )] - fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t; + fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; } - _vld4_dup_f64(a as _) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrnd64zq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] + +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" + link_name = "llvm.aarch64.frint64z.f64" )] - fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; + fn _vrnd64z_f64(a: f64) -> f64; } - _vld4q_dup_f64(a as _) + transmute(_vrnd64z_f64(simd_extract!(a, 0))) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" + link_name = "llvm.trunc.v2f32" )] - fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; + fn _vrnd_f32(a: float32x2_t) -> float32x2_t; } - _vld4q_dup_s64(a as _) + _vrnd_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" + link_name = "llvm.trunc.v2f32" )] - fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t; + fn _vrnd_f32(a: float32x2_t) -> float32x2_t; } - _vld4_f64(a as _) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnd_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" + link_name = "llvm.trunc.v4f32" )] - fn _vld4_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - n: i64, - ptr: *const i8, - ) -> float64x1x4_t; + fn _vrndq_f32(a: float32x4_t) -> float32x4_t; } - _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vrndq_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" + link_name = "llvm.trunc.v4f32" )] - fn _vld4_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - n: i64, - ptr: *const i8, - ) -> int64x1x4_t; + fn _vrndq_f32(a: float32x4_t) -> float32x4_t; } - _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { - static_assert!(LANE == 0); - transmute(vld4_lane_s64::(transmute(a), transmute(b))) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { - static_assert!(LANE == 0); - transmute(vld4_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.trunc.v1f64" + )] + fn _vrnd_f64(a: float64x1_t) -> float64x1_t; + } + _vrnd_f64(a) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4r))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { - transmute(vld4q_dup_s64(transmute(a))) +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.trunc.v2f64" + )] + fn _vrndq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndq_f64(a) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] + +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { - transmute(vld4q_dup_s64(transmute(a))) +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.trunc.v2f64" + )] + fn _vrndq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" + link_name = "llvm.round.v2f32" )] - fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; + fn _vrnda_f32(a: float32x2_t) -> float32x2_t; } - _vld4q_f64(a as _) + _vrnda_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" + link_name = "llvm.round.v2f32" )] - fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; + fn _vrnda_f32(a: float32x2_t) -> float32x2_t; } - _vld4q_s64(a as _) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrnda_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" - )] - fn _vld4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x4_t; + link_name = "llvm.round.v4f32" + )] + fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; } - _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vrndaq_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" + link_name = "llvm.round.v4f32" )] - fn _vld4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x4_t; + fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; } - _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndaq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" + link_name = "llvm.round.v1f64" )] - fn _vld4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x4_t; + fn _vrnda_f64(a: float64x1_t) -> float64x1_t; } - _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vrnda_f64(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4q_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.round.v2f64" + )] + fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndaq_f64(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] + +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld4q_lane_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.round.v2f64" + )] + fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndaq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4q_lane_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v2f32" + )] + fn _vrndi_f32(a: float32x2_t) -> float32x2_t; + } + _vrndi_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld4q_lane_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v2f32" + )] + fn _vrndi_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrndi_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { - transmute(vld4q_s64(transmute(a))) +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v4f32" + )] + fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndiq_f32(a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { - transmute(vld4q_s64(transmute(a))) +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v4f32" + )] + fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndiq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmax))] -pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v1f64" + link_name = "llvm.nearbyint.v1f64" )] - fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vrndi_f64(a: float64x1_t) -> float64x1_t; } - _vmax_f64(a, b) + _vrndi_f64(a) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmax))] -pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f64" + link_name = "llvm.nearbyint.v2f64" )] - fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; } - _vmaxq_f64(a, b) + _vrndiq_f64(a) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] + +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnm))] -pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v1f64" + link_name = "llvm.nearbyint.v2f64" )] - fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; } - _vmaxnm_f64(a, b) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndiq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnm))] -pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f64" + link_name = "llvm.floor.v2f32" )] - fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vrndm_f32(a: float32x2_t) -> float32x2_t; } - _vmaxnmq_f64(a, b) + _vrndm_f32(a) } -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + link_name = "llvm.floor.v2f32" )] - fn _vmaxnmv_f32(a: float32x2_t) -> f32; + fn _vrndm_f32(a: float32x2_t) -> float32x2_t; } - _vmaxnmv_f32(a) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrndm_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + link_name = "llvm.floor.v4f32" )] - fn _vmaxnmvq_f64(a: float64x2_t) -> f64; + fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; } - _vmaxnmvq_f64(a) + _vrndmq_f32(a) } -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmv))] -pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" + link_name = "llvm.floor.v4f32" )] - fn _vmaxnmvq_f32(a: float32x4_t) -> f32; + fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; } - _vmaxnmvq_f32(a) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndmq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmin))] -pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v1f64" + link_name = "llvm.floor.v1f64" )] - fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vrndm_f64(a: float64x1_t) -> float64x1_t; } - _vmin_f64(a, b) + _vrndm_f64(a) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmin))] -pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f64" + link_name = "llvm.floor.v2f64" )] - fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; } - _vminq_f64(a, b) + _vrndmq_f64(a) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] + +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminnm))] -pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v1f64" + link_name = "llvm.floor.v2f64" )] - fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; } - _vminnm_f64(a, b) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndmq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminnm))] -pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f64" + link_name = "llvm.aarch64.neon.frintn.v1f64" )] - fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vrndn_f64(a: float64x1_t) -> float64x1_t; } - _vminnmq_f64(a, b) + _vrndn_f64(a) } -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + link_name = "llvm.aarch64.neon.frintn.v2f64" )] - fn _vminnmv_f32(a: float32x2_t) -> f32; + fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; } - _vminnmv_f32(a) + _vrndnq_f64(a) } -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + link_name = "llvm.aarch64.neon.frintn.v2f64" )] - fn _vminnmvq_f64(a: float64x2_t) -> f64; + fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; } - _vminnmvq_f64(a) + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndnq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndns_f32(a: f32) -> f32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" + link_name = "llvm.roundeven.f32" )] - fn _vminnmvq_f32(a: float32x4_t) -> f32; + fn _vrndns_f32(a: f32) -> f32; } - _vminnmvq_f32(a) + _vrndns_f32(a) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - simd_add(a, simd_mul(b, c)) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v2f32" + )] + fn _vrndp_f32(a: float32x2_t) -> float32x2_t; + } + _vrndp_f32(a) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - simd_add(a, simd_mul(b, c)) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v2f32" + )] + fn _vrndp_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrndp_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v4f32" + )] + fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndpq_f32(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v4f32" + )] + fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndpq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v1f64" + )] + fn _vrndp_f64(a: float64x1_t) -> float64x1_t; + } + _vrndp_f64(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v2f64" + )] + fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndpq_f64(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] + +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ceil.v2f64" + )] + fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndpq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v2f32" + )] + fn _vrndx_f32(a: float32x2_t) -> float32x2_t; + } + _vrndx_f32(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v2f32" + )] + fn _vrndx_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrndx_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v4f32" + )] + fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndxq_f32(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vmlal_high_s16(a, b, vdupq_n_s16(c)) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v4f32" + )] + fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndxq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vmlal_high_s32(a, b, vdupq_n_s32(c)) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v1f64" + )] + fn _vrndx_f64(a: float64x1_t) -> float64x1_t; + } + _vrndx_f64(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - vmlal_high_u16(a, b, vdupq_n_u16(c)) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v2f64" + )] + fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndxq_f64(a) } -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] + +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - vmlal_high_u32(a, b, vdupq_n_u32(c)) +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.rint.v2f64" + )] + fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrndxq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] +#[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlal_s8(a, b, c) +pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.i64" + )] + fn _vrshld_s64(a: i64, b: i64) -> i64; + } + _vrshld_s64(a, b) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] +#[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlal_s16(a, b, c) +pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.i64" + )] + fn _vrshld_u64(a: i64, b: i64) -> i64; + } + _vrshld_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] +#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlal_s32(a, b, c) +pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + vrshld_s64(a, -N as i64) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlal_u8(a, b, c) +pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + vrshld_u64(a, -N as i64) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlal_u16(a, b, c) +pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlal_u32(a, b, c) +pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + vrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_high_s32( +pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + vrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_s32( +pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + vrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vmlsl_high_s16(a, b, vdupq_n_s16(c)) +pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v1f64" + )] + fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; + } + _vrsqrte_f64(a) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vmlsl_high_s32(a, b, vdupq_n_s32(c)) +pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f64" + )] + fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; + } + _vrsqrteq_f64(a) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - vmlsl_high_u16(a, b, vdupq_n_u16(c)) +pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f64" + )] + fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = _vrsqrteq_f64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - vmlsl_high_u32(a, b, vdupq_n_u32(c)) +pub unsafe fn vrsqrted_f64(a: f64) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.f64" + )] + fn _vrsqrted_f64(a: f64) -> f64; + } + _vrsqrted_f64(a) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlsl_s8(a, b, c) +pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.f32" + )] + fn _vrsqrtes_f32(a: f32) -> f32; + } + _vrsqrtes_f32(a) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlsl_s16(a, b, c) +pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v1f64" + )] + fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vrsqrts_f64(a, b) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlsl_s32(a, b, c) +pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f64" + )] + fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vrsqrtsq_f64(a, b) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlsl_u8(a, b, c) +pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f64" + )] + fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = _vrsqrtsq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlsl_u16(a, b, c) +pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.f64" + )] + fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; + } + _vrsqrtsd_f64(a, b) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlsl_u32(a, b, c) +pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.f32" + )] + fn _vrsqrtss_f32(a: f32, b: f32) -> f32; + } + _vrsqrtss_f32(a, b) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] + +#[doc = "Signed rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vmovl_s8(a) +pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + let b: i64 = vrshrd_n_s64::(b); + a.wrapping_add(b) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] + +#[doc = "Unsigned rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vmovl_s16(a) +pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + let b: u64 = vrshrd_n_u64::(b); + a.wrapping_add(b) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - vmovl_s32(a) +pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x: int8x8_t = vrsubhn_s16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vmovl_u8(a) +pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: int8x8_t = vrsubhn_s16(b, c); + let ret_val: int8x16_t = + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vmovl_u16(a) +pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x: int16x4_t = vrsubhn_s32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - vmovl_u32(a) +pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let x: int16x4_t = vrsubhn_s32(b, c); + let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - let c: int8x8_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x: int32x2_t = vrsubhn_s64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - let c: int16x4_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let x: int32x2_t = vrsubhn_s64(b, c); + let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - let c: int32x2_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3]) +pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let x: uint8x8_t = vrsubhn_u16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - let c: uint8x8_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: uint8x8_t = vrsubhn_u16(b, c); + let ret_val: uint8x16_t = + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - let c: uint16x4_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let x: uint16x4_t = vrsubhn_u32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - let c: uint32x2_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3]) +pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let x: uint16x4_t = vrsubhn_u32(b, c); + let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmul))] -pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_mul(a, b) +pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let x: uint32x2_t = vrsubhn_u64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmul))] -pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_mul(a, b) +pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let x: uint32x2_t = vrsubhn_u64(b, c); + let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { +pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { static_assert!(LANE == 0); - simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) + simd_insert!(b, LANE as u32, a) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { +pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { static_assert_uimm_bits!(LANE, 1); - simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) + simd_insert!(b, LANE as u32, a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { - simd_mul(a, vdup_n_f64(b)) +pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] + +#[doc = "SHA512 hash update part 2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { - simd_mul(a, vdupq_n_f64(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h2))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512h2" + )] + fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] + +#[doc = "SHA512 hash update part 2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE == 0); - let b: f64 = simd_extract!(b, LANE as u32); - a * b +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h2))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512h2" + )] + fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = + _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] + +#[doc = "SHA512 hash update part 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512h" + )] + fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] + +#[doc = "SHA512 hash update part 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512h" + )] + fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = + _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] + +#[doc = "SHA512 schedule update 0"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su0))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su0" + )] + fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] + +#[doc = "SHA512 schedule update 0"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su0))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su0" + )] + fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] + +#[doc = "SHA512 schedule update 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su1" + )] + fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] + +#[doc = "SHA512 schedule update 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su1" + )] + fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = + _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(sshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { + transmute(vshl_s64(transmute(a), transmute(b))) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ushl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { + transmute(vshl_u64(transmute(a), transmute(b))) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - vmull_high_s16(a, vdupq_n_s16(b)) +pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vshll_n_s8::(b) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - vmull_high_s32(a, vdupq_n_s32(b)) +pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = vshll_n_s8::(b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { - vmull_high_u16(a, vdupq_n_u16(b)) +pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vshll_n_s16::(b) } -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { - vmull_high_u32(a, vdupq_n_u32(b)) +pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let ret_val: int32x4_t = vshll_n_s16::(b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { - vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) +pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); + vshll_n_s32::(b) } -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_p8(a, b) +pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let ret_val: int64x2_t = vshll_n_s32::(b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_s8(a, b) +pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vshll_n_u8::(b) } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vmull_s16(a, b) +pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = vshll_n_u8::(b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - vmull_s32(a, b) +pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vshll_n_u16::(b) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_u8(a, b) +pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let ret_val: uint32x4_t = vshll_n_u16::(b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vmull_u16(a, b) +pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + vshll_n_u32::(b) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - vmull_u32(a, b) +pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let ret_val: uint64x2_t = vshll_n_u32::(b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull64" - )] - fn _vmull_p64(a: p64, b: p64) -> int8x16_t; - } - transmute(_vmull_p64(a, b)) +pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + vshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] +pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - let b: f32 = simd_extract!(b, LANE as u32); - a * b +pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - let b: f32 = simd_extract!(b, LANE as u32); - a * b +pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - let b: f64 = simd_extract!(b, LANE as u32); - a * b +pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f32" - )] - fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmulx_f32(a, b) +pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v4f32" - )] - fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmulxq_f32(a, b) +pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + vshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v1f64" - )] - fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vmulx_f64(a, b) +pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f64" - )] - fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vmulxq_f64(a, b) +pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v8i8" + )] + fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + } + _vsli_n_s8(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v8i8" + )] + fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vsli_n_s8(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v16i8" + )] + fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + } + _vsliq_n_s8(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v16i8" + )] + fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vsliq_n_s8(a, b, N); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v4i16" + )] + fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; + } + _vsli_n_s16(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { - unsafe extern "unadjusted" { +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.f64" + link_name = "llvm.aarch64.neon.vsli.v4i16" )] - fn _vmulxd_f64(a: f64, b: f64) -> f64; + fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; } - _vmulxd_f64(a, b) + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vsli_n_s16(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { - unsafe extern "unadjusted" { +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.f32" + link_name = "llvm.aarch64.neon.vsli.v8i16" )] - fn _vmulxs_f32(a: f32, b: f32) -> f32; + fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; } - _vmulxs_f32(a, b) + _vsliq_n_s16(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE == 0); - vmulxd_f64(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v8i16" + )] + fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vsliq_n_s16(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - vmulxd_f64(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v2i32" + )] + fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + _vsli_n_s32(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v2i32" + )] + fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vsli_n_s32(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v4i32" + )] + fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + } + _vsliq_n_s32(a, b, N) } -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v4i32" + )] + fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vsliq_n_s32(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fneg))] -pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { - simd_neg(a) +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 0 && N <= 63); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v1i64" + )] + fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; + } + _vsli_n_s64(a, b, N) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fneg))] -pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { - simd_neg(a) +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v2i64" + )] + fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + } + _vsliq_n_s64(a, b, N) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { - simd_neg(a) +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v2i64" + )] + fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vsliq_n_s64(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { - simd_neg(a) +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vsli_n_s8::(transmute(a), transmute(b))) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vnegd_s64(a: i64) -> i64 { - a.wrapping_neg() +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { - let a1: f64 = simd_extract!(a, 0); - let a2: f64 = simd_extract!(a, 1); - a1 + a2 +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vsliq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { - let a1: f32 = simd_extract!(a, 0); - let a2: f32 = simd_extract!(a, 1); - a1 + a2 +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = transmute(vsliq_n_s8::(transmute(a), transmute(b))); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v4f32" - )] - fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpaddq_f32(a, b) +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vsli_n_s16::(transmute(a), transmute(b))) } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f64" - )] - fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpaddq_f64(a, b) +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" - )] - fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpmaxnm_f32(a, b) +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" - )] - fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpmaxnmq_f32(a, b) +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" - )] - fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpmaxnmq_f64(a, b) +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + transmute(vsli_n_s32::(transmute(a), transmute(b))) } -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; - } - _vpmaxnmqd_f64(a) +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = transmute(vsli_n_s32::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vpmaxnms_f32(a: float32x2_t) -> f32; - } - _vpmaxnms_f32(a) +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + transmute(vsliq_n_s32::(transmute(a), transmute(b))) } -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" - )] - fn _vpmaxqd_f64(a: float64x2_t) -> f64; - } - _vpmaxqd_f64(a) +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = transmute(vsliq_n_s32::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" - )] - fn _vpmaxs_f32(a: float32x2_t) -> f32; - } - _vpmaxs_f32(a) +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f32" - )] - fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpminnm_f32(a, b) +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v4f32" - )] - fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpminnmq_f32(a, b) +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f64" - )] - fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpminnmq_f64(a, b) +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vsli_n_s8::(transmute(a), transmute(b))) } -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vpminnmqd_f64(a: float64x2_t) -> f64; - } - _vpminnmqd_f64(a) +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vpminnms_f32(a: float32x2_t) -> f32; - } - _vpminnms_f32(a) +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vsliq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f64.v2f64" - )] - fn _vpminqd_f64(a: float64x2_t) -> f64; - } - _vpminqd_f64(a) +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = transmute(vsliq_n_s8::(transmute(a), transmute(b))); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v2f32" - )] - fn _vpmins_f32(a: float32x2_t) -> f32; - } - _vpmins_f32(a) +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vsli_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v1i64" - )] - fn _vqabs_s64(a: int64x1_t) -> int64x1_t; - } - _vqabs_s64(a) +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i64" - )] - fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; - } - _vqabsq_s64(a) +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsb_s8(a: i8) -> i8 { - simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsh_s16(a: i16) -> i16 { - simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) +pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabss_s32(a: i32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.i32" - )] - fn _vqabss_s32(a: i32) -> i32; - } - _vqabss_s32(a) +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsd_s64(a: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.i64" - )] - fn _vqabsd_s64(a: i64) -> i64; - } - _vqabsd_s64(a) +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 0 && N <= 63); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] + +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqadd_s8(a, b), 0) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] + +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqadd_s16(a, b), 0) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_u64::(transmute(a), transmute(b))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] + +#[doc = "SM3PARTW1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: uint8x8_t = vdup_n_u8(b); - simd_extract!(vqadd_u8(a, b), 0) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3partw1" + )] + fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] + +#[doc = "SM3PARTW1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: uint16x4_t = vdup_n_u16(b); - simd_extract!(vqadd_u16(a, b), 0) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3partw1" + )] + fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] + +#[doc = "SM3PARTW2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.i32" + link_name = "llvm.aarch64.crypto.sm3partw2" )] - fn _vqadds_s32(a: i32, b: i32) -> i32; + fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vqadds_s32(a, b) + _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] + +#[doc = "SM3PARTW2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.i64" + link_name = "llvm.aarch64.crypto.sm3partw2" )] - fn _vqaddd_s64(a: i64, b: i64) -> i64; + fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vqaddd_s64(a, b) + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] + +#[doc = "SM3SS1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.i32" + link_name = "llvm.aarch64.crypto.sm3ss1" )] - fn _vqadds_u32(a: i32, b: i32) -> i32; + fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] + +#[doc = "SM3SS1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.i64" + link_name = "llvm.aarch64.crypto.sm3ss1" )] - fn _vqaddd_u64(a: i64, b: i64) -> i64; + fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] + +#[doc = "SM3TT1A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1a" + )] + fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] + +#[doc = "SM3TT1A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1a" + )] + fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] + +#[doc = "SM3TT1B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1b" + )] + fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] + +#[doc = "SM3TT1B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vqaddq_s32(a, vqdmull_high_n_s16(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - vqaddq_s32(a, vqdmull_high_s16(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1b" + )] + fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] + +#[doc = "SM3TT2A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vqaddq_s64(a, vqdmull_high_n_s32(b, c)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2a" + )] + fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] + +#[doc = "SM3TT2A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - vqaddq_s64(a, vqdmull_high_s32(b, c)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2a" + )] + fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] + +#[doc = "SM3TT2B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2b" + )] + fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] + +#[doc = "SM3TT2B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2b" + )] + fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] + +#[doc = "SM4 key"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4ekey" + )] + fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] + +#[doc = "SM4 key"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4ekey" + )] + fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] + +#[doc = "SM4 encode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4e" + )] + fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] + +#[doc = "SM4 encode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4e" + )] + fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { - let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); - vqadds_s32(a, simd_extract!(x, 0)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i8" + )] + fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vsqadd_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { - let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c)); - x as i64 +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i8" + )] + fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vsqadd_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v16i8" + )] + fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vsqaddq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v16i8" + )] + fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vsqaddq_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i16" + )] + fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vsqadd_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i16" + )] + fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vsqadd_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vqsubq_s32(a, vqdmull_high_n_s16(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i16" + )] + fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vsqaddq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - vqsubq_s32(a, vqdmull_high_s16(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i16" + )] + fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vsqaddq_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vqsubq_s64(a, vqdmull_high_n_s32(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i32" + )] + fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vsqadd_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - vqsubq_s64(a, vqdmull_high_s32(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i32" + )] + fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vsqadd_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i32" + )] + fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsqaddq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i32" + )] + fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vsqaddq_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v1i64" + )] + fn _vsqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vsqadd_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i64" + )] + fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vsqaddq_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] + +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i64" + )] + fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vsqaddq_u64(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] + +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { + simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] + +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { - let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); - vqsubs_s32(a, simd_extract!(x, 0)) +pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { + simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] + +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { - let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c)); - x as i64 +pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.i64" + )] + fn _vsqaddd_u64(a: i64, b: i64) -> i64; + } + _vsqaddd_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] + +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) +pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.i32" + )] + fn _vsqadds_u32(a: i32, b: i32) -> i32; + } + _vsqadds_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) +pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { + simd_fsqrt(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) +pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_fsqrt(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) +pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { + simd_fsqrt(a) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_fsqrt(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { + simd_fsqrt(a) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqdmulh_s16(a, b), 0) +pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { + simd_fsqrt(a) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] + +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - simd_extract!(vqdmulh_s32(a, b), 0) +pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float64x2_t = simd_fsqrt(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i8" + )] + fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + } + _vsri_n_s8(a, b, N) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i8" + )] + fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vsri_n_s8(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v16i8" + )] + fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + } + _vsriq_n_s8(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v16i8" + )] + fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vsriq_n_s8(a, b, N); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i16" + )] + fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; + } + _vsri_n_s16(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i16" + )] + fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vsri_n_s16(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = vdup_n_s16(b); - vqdmull_s16(a, b) +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i16" + )] + fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; + } + _vsriq_n_s16(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = vdup_n_s32(b); - vqdmull_s32(a, b) +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i16" + )] + fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vsriq_n_s16(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vqdmull_s16(a, b) +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i32" + )] + fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + _vsri_n_s32(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - vqdmull_s32(a, b) +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i32" + )] + fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vsri_n_s32(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i32" + )] + fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + } + _vsriq_n_s32(a, b, N) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i32" + )] + fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vsriq_n_s32(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v1i64" + )] + fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; + } + _vsri_n_s64(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { - static_assert_uimm_bits!(N, 2); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i64" + )] + fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + } + _vsriq_n_s64(a, b, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { - static_assert_uimm_bits!(N, 3); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i64" + )] + fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vsriq_n_s64(a, b, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqdmull_s16(a, b), 0) +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsri_n_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 1))] +#[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulls.scalar" - )] - fn _vqdmulls_s32(a: i32, b: i32) -> i64; - } - _vqdmulls_s32(a, b) +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsriq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = transmute(vsriq_n_s8::(transmute(a), transmute(b))); simd_shuffle!( - a, - vqmovn_s16(b), + ret_val, + ret_val, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) + ) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsri_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - simd_shuffle!( - a, - vqmovn_u16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsriq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnd_s64(a: i64) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" - )] - fn _vqmovnd_s64(a: i64) -> i32; - } - _vqmovnd_s64(a) +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + transmute(vsri_n_s32::(transmute(a), transmute(b))) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnd_u64(a: u64) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" - )] - fn _vqmovnd_u64(a: i64) -> i32; - } - _vqmovnd_u64(a.as_signed()).as_unsigned() +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = transmute(vsri_n_s32::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnh_s16(a: i16) -> i8 { - simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + transmute(vsriq_n_s32::(transmute(a), transmute(b))) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovns_s32(a: i32) -> i16 { - simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = transmute(vsriq_n_s32::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnh_u16(a: u16) -> u8 { - simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovns_u32(a: u32) -> u16 { - simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsriq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - simd_shuffle!( - a, - vqmovun_s16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsri_n_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovunh_s16(a: i16) -> u8 { - simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsriq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovuns_s32(a: i32) -> u16 { - simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = transmute(vsriq_n_s8::(transmute(a), transmute(b))); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovund_s64(a: i64) -> u32 { - simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsri_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v1i64" - )] - fn _vqneg_s64(a: int64x1_t) -> int64x1_t; - } - _vqneg_s64(a) +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i64" - )] - fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; - } - _vqnegq_s64(a) +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsriq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegb_s8(a: i8) -> i8 { - simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegh_s16(a: i16) -> i16 { - simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) +pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegs_s32(a: i32) -> i32 { - simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsriq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegd_s64(a: i64) -> i64 { - simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] + +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlah_s16(a, b, c) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] + +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlah_s32(a, b, c) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_u64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlah_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlah_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlahq_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlahq_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlahq_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlahq_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" - )] - fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - _vqrdmlah_s16(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" - )] - fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _vqrdmlahq_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" - )] - fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - _vqrdmlah_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" - )] - fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vqrdmlahq_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - let c: int16x4_t = vdup_n_s16(c); - simd_extract!(vqrdmlah_s16(a, b, c), 0) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - let c: int32x2_t = vdup_n_s32(c); - simd_extract!(vqrdmlah_s32(a, b, c), 0) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlsh_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlsh_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlsh_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlsh_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlshq_s16(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlshq_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlshq_s16(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlshq_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" - )] - fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - _vqrdmlsh_s16(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" - )] - fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _vqrdmlshq_s16(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" - )] - fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - _vqrdmlsh_s32(a, b, c) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" - )] - fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vqrdmlshq_s32(a, b, c) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - let c: int16x4_t = vdup_n_s16(c); - simd_extract!(vqrdmlsh_s16(a, b, c), 0) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - let c: int32x2_t = vdup_n_s32(c); - simd_extract!(vqrdmlsh_s32(a, b, c), 0) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { - simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { - simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqrshl_s8(a, b), 0) +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqrshl_s16(a, b), 0) +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqrshl_u8(a, b), 0) +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqrshl_u16(a, b), 0) +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.i64" - )] - fn _vqrshld_s64(a: i64, b: i64) -> i64; - } - _vqrshld_s64(a, b) +pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.i32" - )] - fn _vqrshls_s32(a: i32, b: i32) -> i32; - } - _vqrshls_s32(a, b) +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.i32" - )] - fn _vqrshls_u32(a: i32, b: i32) -> i32; - } - _vqrshls_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { +pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.i64" + link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" )] - fn _vqrshld_u64(a: i64, b: i64) -> i64; + fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64); } - _vqrshld_u64(a.as_signed(), b).as_unsigned() + _vst1_f64_x2(b.0, b.1, a) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" + )] + fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); + } + _vst1q_f64_x2(b.0, b.1, a) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" + )] + fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); + } + let mut b: float64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1q_f64_x2(b.0, b.1, a) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) +pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" + )] + fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64); + } + _vst1_f64_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" + )] + fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); + } + _vst1q_f64_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" + )] + fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); + } + let mut b: float64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1q_f64_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) +pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" + )] + fn _vst1_f64_x4( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + ptr: *mut f64, + ); + } + _vst1_f64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { - static_assert!(N >= 1 && N <= 32); - let a: uint64x2_t = vdupq_n_u64(a); - simd_extract!(vqrshrn_n_u64::(a), 0) +pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" + )] + fn _vst1q_f64_x4( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + ptr: *mut f64, + ); + } + _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { - static_assert!(N >= 1 && N <= 8); - let a: uint16x8_t = vdupq_n_u16(a); - simd_extract!(vqrshrn_n_u16::(a), 0) +pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" + )] + fn _vst1q_f64_x4( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + ptr: *mut f64, + ); + } + let mut b: float64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { - static_assert!(N >= 1 && N <= 16); - let a: uint32x4_t = vdupq_n_u32(a); - simd_extract!(vqrshrn_n_u32::(a), 0) +pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { - static_assert!(N >= 1 && N <= 8); - let a: int16x8_t = vdupq_n_s16(a); - simd_extract!(vqrshrn_n_s16::(a), 0) +pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { - static_assert!(N >= 1 && N <= 16); - let a: int32x4_t = vdupq_n_s32(a); - simd_extract!(vqrshrn_n_s32::(a), 0) +pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { - static_assert!(N >= 1 && N <= 32); - let a: int64x2_t = vdupq_n_s64(a); - simd_extract!(vqrshrn_n_s64::(a), 0) +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" + )] + fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8); + } + _vst2_f64(b.0, b.1, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrun_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" + )] + fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8); + } + _vst2_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" + )] + fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) +pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { + static_assert!(LANE == 0); + vst2_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { - static_assert!(N >= 1 && N <= 32); - let a: int64x2_t = vdupq_n_s64(a); - simd_extract!(vqrshrun_n_s64::(a), 0) +pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { + static_assert!(LANE == 0); + vst2_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { - static_assert!(N >= 1 && N <= 8); - let a: int16x8_t = vdupq_n_s16(a); - simd_extract!(vqrshrun_n_s16::(a), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" + )] + fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); + } + _vst2q_f64(b.0, b.1, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { - static_assert!(N >= 1 && N <= 16); - let a: int32x4_t = vdupq_n_s32(a); - simd_extract!(vqrshrun_n_s32::(a), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" + )] + fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); + } + let mut b: float64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2q_f64(b.0, b.1, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshl_n_s8::(vdup_n_s8(a)), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" + )] + fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); + } + _vst2q_s64(b.0, b.1, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_n_s64(a: i64) -> i64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshl_n_s64::(vdup_n_s64(a)), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" + )] + fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); + } + let mut b: int64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2q_s64(b.0, b.1, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshl_n_s16::(vdup_n_s16(a)), 0) +pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" + )] + fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_n_s32(a: i32) -> i32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshl_n_s32::(vdup_n_s32(a)), 0) +pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" + )] + fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); + } + let mut b: float64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshl_n_u8::(vdup_n_u8(a)), 0) +pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" + )] + fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_n_u64(a: u64) -> u64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshl_n_u64::(vdup_n_u64(a)), 0) +pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" + )] + fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); + } + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshl_n_u16::(vdup_n_u16(a)), 0) +pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" + )] + fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_n_u32(a: u32) -> u32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshl_n_u32::(vdup_n_u32(a)), 0) +pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" + )] + fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); + } + let mut b: int64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { - let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); - simd_extract!(c, 0) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { - let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); - simd_extract!(c, 0) +pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { - let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); - simd_extract!(c, 0) +pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { - let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); - simd_extract!(c, 0) +pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { - let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); - simd_extract!(c, 0) +pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { - let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); - simd_extract!(c, 0) +pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.i64" - )] - fn _vqshld_s64(a: i64, b: i64) -> i64; - } - _vqshld_s64(a, b) +pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.i64" - )] - fn _vqshld_u64(a: i64, b: i64) -> i64; - } - _vqshld_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshlu_n_s8::(vdup_n_s8(a)), 0) +pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshlu_n_s64::(vdup_n_s64(a)), 0) +pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { + let mut b: poly64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshlu_n_s16::(vdup_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshlu_n_s32::(vdup_n_s32(a)), 0) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { + let mut b: uint64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" + )] + fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8); + } + _vst3_f64(b.0, b.1, b.2, a as _) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" + )] + fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8); + } + _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) +pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { + static_assert!(LANE == 0); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" + )] + fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { + static_assert!(LANE == 0); + vst3_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { + static_assert!(LANE == 0); + vst3_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" + )] + fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); + } + _vst3q_f64(b.0, b.1, b.2, a as _) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.i32" + link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" )] - fn _vqshrnd_n_s64(a: i64, n: i32) -> i32; + fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); } - _vqshrnd_n_s64(a, N) + let mut b: float64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3q_f64(b.0, b.1, b.2, a as _) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.i32" + link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" )] - fn _vqshrnd_n_u64(a: i64, n: i32) -> i32; + fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); } - _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() + _vst3q_s64(b.0, b.1, b.2, a as _) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrn_n_s16::(vdupq_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" + )] + fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); + } + let mut b: int64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3q_s64(b.0, b.1, b.2, a as _) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrn_n_s32::(vdupq_n_s32(a)), 0) +pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" + )] + fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrn_n_u16::(vdupq_n_u16(a)), 0) +pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" + )] + fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); + } + let mut b: float64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrn_n_u32::(vdupq_n_u32(a)), 0) +pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + )] + fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrun_n_s16::(b), +pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + )] + fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + )] + fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) +pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + )] + fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + } + let mut b: int64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_endian = "little")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { - static_assert!(N >= 1 && N <= 32); - simd_extract!(vqshrun_n_s64::(vdupq_n_s64(a)), 0) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3q_lane_s64::(transmute(a), transmute(b)) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrun_n_s16::(vdupq_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrun_n_s32::(vdupq_n_s32(a)), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqsub_s8(a, b), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqsub_s16(a, b), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: uint8x8_t = vdup_n_u8(b); - simd_extract!(vqsub_u8(a, b), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: uint16x4_t = vdup_n_u16(b); - simd_extract!(vqsub_u16(a, b), 0) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.i32" - )] - fn _vqsubs_s32(a: i32, b: i32) -> i32; - } - _vqsubs_s32(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.i64" - )] - fn _vqsubd_s64(a: i64, b: i64) -> i64; - } - _vqsubd_s64(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { + let mut b: poly64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.i32" - )] - fn _vqsubs_u32(a: i32, b: i32) -> i32; - } - _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.i64" - )] - fn _vqsubd_u64(a: i64, b: i64) -> i64; - } - _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { + let mut b: uint64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Rotate and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(rax1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.rax1" + link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" )] - fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8); } - _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vst4_f64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { + static_assert!(LANE == 0); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v8i8" + link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" )] - fn _vrbit_s8(a: int8x8_t) -> int8x8_t; + fn _vst4_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + n: i64, + ptr: *mut i8, + ); } - _vrbit_s8(a) + _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { + static_assert!(LANE == 0); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v16i8" + link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" )] - fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; - } - _vrbitq_s8(a) -} -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vrbit_s8(transmute(a))) -} -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vrbitq_s8(transmute(a))) + fn _vst4_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { - transmute(vrbit_s8(transmute(a))) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { + static_assert!(LANE == 0); + vst4_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { - transmute(vrbitq_s8(transmute(a))) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { + static_assert!(LANE == 0); + vst4_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v1f64" + link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" )] - fn _vrecpe_f64(a: float64x1_t) -> float64x1_t; + fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); } - _vrecpe_f64(a) + _vst4q_f64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f64" + link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" )] - fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; + fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); } - _vrecpeq_f64(a) + let mut b: float64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4q_f64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecped_f64(a: f64) -> f64 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.f64" + link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" )] - fn _vrecped_f64(a: f64) -> f64; + fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); } - _vrecped_f64(a) + _vst4q_s64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpes_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.f32" + link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" )] - fn _vrecpes_f32(a: f32) -> f32; + fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); } - _vrecpes_f32(a) + let mut b: int64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4q_s64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v1f64" + link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" )] - fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vst4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *mut i8, + ); } - _vrecps_f64(a, b) + _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f64" + link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" )] - fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vst4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *mut i8, + ); } - _vrecpsq_f64(a, b) + let mut b: float64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.f64" + link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" )] - fn _vrecpsd_f64(a: f64, b: f64) -> f64; + fn _vst4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *mut i8, + ); } - _vrecpsd_f64(a, b) + _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.f32" + link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" )] - fn _vrecpss_f32(a: f32, b: f32) -> f32; + fn _vst4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *mut i8, + ); } - _vrecpss_f32(a, b) + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpx))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpxd_f64(a: f64) -> f64 { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpx.f64" + link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" )] - fn _vrecpxd_f64(a: f64) -> f64; + fn _vst4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *mut i8, + ); } - _vrecpxd_f64(a) + _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpx))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpxs_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpx.f32" + link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" )] - fn _vrecpxs_f32(a: f32) -> f32; + fn _vst4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *mut i8, + ); } - _vrecpxs_f32(a) + let mut b: int64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { - transmute(a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: poly64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: uint8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + let mut b: poly8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { - transmute(a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { - transmute(a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { + let mut b: poly64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { + let mut b: uint64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { - transmute(a) +#[cfg_attr(test, assert_instr(fsub))] +pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_sub(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(fsub))] +pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_sub(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(fsub))] +pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { + a.wrapping_sub(b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { + a.wrapping_sub(b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: int16x8_t = simd_cast(c); + let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: int16x8_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: int16x8_t = simd_cast(c); + let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: int16x8_t = simd_cast(e); + let ret_val: int16x8_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: int32x4_t = simd_cast(c); + let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: int32x4_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: int32x4_t = simd_cast(c); + let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: int32x4_t = simd_cast(e); + let ret_val: int32x4_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: int64x2_t = simd_cast(c); + let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: int64x2_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: int64x2_t = simd_cast(c); + let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: int64x2_t = simd_cast(e); + let ret_val: int64x2_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint16x8_t = simd_cast(c); + let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint16x8_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint16x8_t = simd_cast(c); + let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint16x8_t = simd_cast(e); + let ret_val: uint16x8_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: uint32x4_t = simd_cast(c); + let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: uint32x4_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: uint32x4_t = simd_cast(c); + let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: uint32x4_t = simd_cast(e); + let ret_val: uint32x4_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: uint64x2_t = simd_cast(c); + let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: uint64x2_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: uint64x2_t = simd_cast(c); + let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: uint64x2_t = simd_cast(e); + let ret_val: uint64x2_t = simd_sub(d, f); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: int32x4_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: int64x2_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let ret_val: uint32x4_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + simd_sub(a, simd_cast(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let ret_val: uint64x2_t = simd_sub(a, simd_cast(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, transmute(c), b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, transmute(c), b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - transmute(a) +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - transmute(a) +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { - transmute(a) +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { - transmute(a) +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - transmute(a) +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - transmute(a) +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - transmute(a) +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - transmute(a) +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - transmute(a) +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { - transmute(a) +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { - transmute(a) +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(a.0, a.1), + vcombine_s8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = int8x16x2_t( + vcombine_s8(a.0, a.1), + vcombine_s8(a.2, crate::mem::zeroed()), + ); + let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - transmute(a) +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(a.0, a.1), + vcombine_u8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { - transmute(a) +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t( + vcombine_u8(a.0, a.1), + vcombine_u8(a.2, crate::mem::zeroed()), + ); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { - transmute(a) +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(a.0, a.1), + vcombine_p8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t( + vcombine_p8(a.0, a.1), + vcombine_p8(a.2, crate::mem::zeroed()), + ); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f32" - )] - fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd32x_f32(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v4f32" - )] - fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd32xq_f32(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); + a.3 = simd_shuffle!(a.3, a.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); + let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f64" - )] - fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd32xq_f64(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32x.f64" - )] - fn _vrnd32x_f64(a: f64) -> f64; - } - transmute(_vrnd32x_f64(simd_extract!(a, 0))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f32" - )] - fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd32z_f32(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v4f32" - )] - fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd32zq_f32(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f64" - )] - fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd32zq_f64(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_select( + simd_lt::(c, transmute(i8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_s8(b, crate::mem::zeroed())), + transmute(c), + )), + a, + ) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32z.f64" - )] - fn _vrnd32z_f64(a: f64) -> f64; - } - transmute(_vrnd32z_f64(simd_extract!(a, 0))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_select( + simd_lt::(c, transmute(i8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_s8(b, crate::mem::zeroed())), + transmute(c), + )), + a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f32" - )] - fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd64x_f32(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_u8(b, crate::mem::zeroed())), + c, + )), + a, + ) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v4f32" - )] - fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd64xq_f32(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_u8(b, crate::mem::zeroed())), + c, + )), + a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f64" - )] - fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd64xq_f64(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_p8(b, crate::mem::zeroed())), + c, + )), + a, + ) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64x.f64" - )] - fn _vrnd64x_f64(a: f64) -> f64; - } - transmute(_vrnd64x_f64(simd_extract!(a, 0))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_p8(b, crate::mem::zeroed())), + c, + )), + a, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f32" - )] - fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd64z_f32(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v4f32" - )] - fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd64zq_f32(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x2_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f64" - )] - fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd64zq_f64(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64z.f64" - )] - fn _vrnd64z_f64(a: f64) -> f64; - } - transmute(_vrnd64z_f64(simd_extract!(a, 0))) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f32" - )] - fn _vrnd_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd_f32(a) +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v4f32" - )] - fn _vrndq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndq_f32(a) +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(b.0, b.1), + vcombine_s8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(i8x8::splat(24))), + transmute(vqtbx2( + transmute(a), + transmute(x.0), + transmute(x.1), + transmute(c), + )), + a, + )) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v1f64" - )] - fn _vrnd_f64(a: float64x1_t) -> float64x1_t; - } - _vrnd_f64(a) +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x3_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = int8x16x2_t( + vcombine_s8(b.0, b.1), + vcombine_s8(b.2, crate::mem::zeroed()), + ); + let ret_val: int8x8_t = transmute(simd_select( + simd_lt::(transmute(c), transmute(i8x8::splat(24))), + transmute(vqtbx2( + transmute(a), + transmute(x.0), + transmute(x.1), + transmute(c), + )), + a, + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f64" - )] - fn _vrndq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndq_f64(a) +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(b.0, b.1), + vcombine_u8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f32" - )] - fn _vrnda_f32(a: float32x2_t) -> float32x2_t; - } - _vrnda_f32(a) +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t( + vcombine_u8(b.0, b.1), + vcombine_u8(b.2, crate::mem::zeroed()), + ); + let ret_val: uint8x8_t = transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v4f32" - )] - fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndaq_f32(a) +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(b.0, b.1), + vcombine_p8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v1f64" - )] - fn _vrnda_f64(a: float64x1_t) -> float64x1_t; - } - _vrnda_f64(a) +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t( + vcombine_p8(b.0, b.1), + vcombine_p8(b.2, crate::mem::zeroed()), + ); + let ret_val: poly8x8_t = transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f64" - )] - fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndaq_f64(a) +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vqtbx2( + transmute(a), + transmute(vcombine_s8(b.0, b.1)), + transmute(vcombine_s8(b.2, b.3)), + transmute(c), + ) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f32" - )] - fn _vrndi_f32(a: float32x2_t) -> float32x2_t; - } - _vrndi_f32(a) +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x4_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqtbx2( + transmute(a), + transmute(vcombine_s8(b.0, b.1)), + transmute(vcombine_s8(b.2, b.3)), + transmute(c), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v4f32" - )] - fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndiq_f32(a) +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx2( + transmute(a), + transmute(vcombine_u8(b.0, b.1)), + transmute(vcombine_u8(b.2, b.3)), + c, + )) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v1f64" - )] - fn _vrndi_f64(a: float64x1_t) -> float64x1_t; - } - _vrndi_f64(a) +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx2( + transmute(a), + transmute(vcombine_u8(b.0, b.1)), + transmute(vcombine_u8(b.2, b.3)), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f64" - )] - fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndiq_f64(a) +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx2( + transmute(a), + transmute(vcombine_p8(b.0, b.1)), + transmute(vcombine_p8(b.2, b.3)), + c, + )) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f32" - )] - fn _vrndm_f32(a: float32x2_t) -> float32x2_t; - } - _vrndm_f32(a) +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx2( + transmute(a), + transmute(vcombine_p8(b.0, b.1)), + transmute(vcombine_p8(b.2, b.3)), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v4f32" - )] - fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndmq_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v1f64" - )] - fn _vrndm_f64(a: float64x1_t) -> float64x1_t; - } - _vrndm_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f64" - )] - fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndmq_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v1f64" - )] - fn _vrndn_f64(a: float64x1_t) -> float64x1_t; - } - _vrndn_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f64" - )] - fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndnq_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndns_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.f32" - )] - fn _vrndns_f32(a: f32) -> f32; - } - _vrndns_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f32" - )] - fn _vrndp_f32(a: float32x2_t) -> float32x2_t; - } - _vrndp_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v4f32" - )] - fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndpq_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v1f64" - )] - fn _vrndp_f64(a: float64x1_t) -> float64x1_t; - } - _vrndp_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f64" - )] - fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndpq_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f32" - )] - fn _vrndx_f32(a: float32x2_t) -> float32x2_t; - } - _vrndx_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v4f32" - )] - fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndxq_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v1f64" - )] - fn _vrndx_f64(a: float64x1_t) -> float64x1_t; - } - _vrndx_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f64" - )] - fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndxq_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.i64" - )] - fn _vrshld_s64(a: i64, b: i64) -> i64; - } - _vrshld_s64(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.i64" - )] - fn _vrshld_u64(a: i64, b: i64) -> i64; - } - _vrshld_u64(a.as_signed(), b).as_unsigned() +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - vrshld_s64(a, -N as i64) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - vrshld_u64(a, -N as i64) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( a, - vrshrn_n_s16::(b), + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] +} + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v1f64" - )] - fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; - } - _vrsqrte_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f64" - )] - fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; - } - _vrsqrteq_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrted_f64(a: f64) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f64" - )] - fn _vrsqrted_f64(a: f64) -> f64; - } - _vrsqrted_f64(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f32" - )] - fn _vrsqrtes_f32(a: f32) -> f32; - } - _vrsqrtes_f32(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v1f64" - )] - fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vrsqrts_f64(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f64" - )] - fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vrsqrtsq_f64(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f64" - )] - fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; - } - _vrsqrtsd_f64(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f32" - )] - fn _vrsqrtss_f32(a: f32, b: f32) -> f32; - } - _vrsqrtss_f32(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Signed rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - let b: i64 = vrshrd_n_s64::(b); - a.wrapping_add(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - let b: u64 = vrshrd_n_u64::(b); - a.wrapping_add(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let x: int8x8_t = vrsubhn_s16(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let x: int16x4_t = vrsubhn_s32(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let x: int32x2_t = vrsubhn_s64(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let x: uint8x8_t = vrsubhn_u16(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let x: uint16x4_t = vrsubhn_u32(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let x: uint32x2_t = vrsubhn_u64(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "SHA512 hash update part 2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h2))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h2" - )] - fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "SHA512 hash update part 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h" - )] - fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "SHA512 schedule update 0"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su0))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su0" - )] - fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "SHA512 schedule update 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su1" - )] - fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { - transmute(vshl_s64(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { - transmute(vshl_u64(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vshll_n_s8::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vshll_n_s16::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); - vshll_n_s32::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vshll_n_u8::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vshll_n_u16::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - vshll_n_u32::(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_u64::(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) } -#[doc = "SM3PARTW1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw1" - )] - fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "SM3PARTW2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw2))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw2" - )] - fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "SM3SS1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3ss1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3ss1" - )] - fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "SM4 key"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4ekey))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4ekey" - )] - fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "SM4 encode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4e))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4e" - )] - fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { - simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { - simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.i64" - )] - fn _vsqaddd_u64(a: i64, b: i64) -> i64; - } - _vsqaddd_u64(a.as_signed(), b).as_unsigned() +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.i32" - )] - fn _vsqadds_u32(a: i32, b: i32) -> i32; - } - _vsqadds_u32(a.as_signed(), b).as_unsigned() +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { - simd_fsqrt(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { - simd_fsqrt(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { - simd_fsqrt(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { - simd_fsqrt(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Shift right and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] -pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Shift right and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] -pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_u64::(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" - )] - fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64); - } - _vst1_f64_x2(b.0, b.1, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" - )] - fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); - } - _vst1q_f64_x2(b.0, b.1, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" - )] - fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64); - } - _vst1_f64_x3(b.0, b.1, b.2, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" - )] - fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); - } - _vst1q_f64_x3(b.0, b.1, b.2, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" - )] - fn _vst1_f64_x4( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - ptr: *mut f64, - ); - } - _vst1_f64_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) +} + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" - )] - fn _vst1q_f64_x4( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - ptr: *mut f64, - ); - } - _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] + +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" - )] - fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8); - } - _vst2_f64(b.0, b.1, a as _) +pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + let c: int64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" - )] - fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8); - } - _vst2_lane_f64(b.0, b.1, LANE as i64, a as _) +pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let c: int64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" - )] - fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8); - } - _vst2_lane_s64(b.0, b.1, LANE as i64, a as _) +pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { - static_assert!(LANE == 0); - vst2_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + let c: poly64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { - static_assert!(LANE == 0); - vst2_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + let c: poly64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" - )] - fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); - } - _vst2q_f64(b.0, b.1, a as _) +pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: poly64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + let ret_val: uint64x2_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" - )] - fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); - } - _vst2q_s64(b.0, b.1, a as _) +pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c: uint64x1_t = simd_and(a, b); + let d: u64x1 = u64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" - )] - fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) +pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c: uint64x2_t = simd_and(a, b); + let d: u64x2 = u64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" - )] - fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) +pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_and(a, b); + let d: u64x2 = u64x2::new(0, 0); + let ret_val: uint64x2_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] + +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" - )] - fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) +pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { + transmute(vtst_s64(transmute(a), transmute(b))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] + +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { + transmute(vtst_u64(transmute(a), transmute(b))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - vst2q_lane_s8::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i8" + )] + fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vuqadd_s8(a, b.as_signed()) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2q_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i8" + )] + fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vuqadd_s8(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - vst2q_lane_s8::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v16i8" + )] + fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vuqaddq_s8(a, b.as_signed()) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { - vst2q_s64(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v16i8" + )] + fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vuqaddq_s8(a, b.as_signed()); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { - vst2q_s64(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v4i16" + )] + fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vuqadd_s16(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v4i16" )] - fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8); + fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vst3_f64(b.0, b.1, b.2, a as _) + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vuqadd_s16(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v8i16" )] - fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8); + fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + _vuqaddq_s16(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v8i16" )] - fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8); + fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vuqaddq_s16(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { - static_assert!(LANE == 0); - vst3_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i32" + )] + fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vuqadd_s32(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { - static_assert!(LANE == 0); - vst3_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i32" + )] + fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vuqadd_s32(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v4i32" )] - fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); + fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vst3q_f64(b.0, b.1, b.2, a as _) + _vuqaddq_s32(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v4i32" )] - fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); + fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vst3q_s64(b.0, b.1, b.2, a as _) + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vuqaddq_s32(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v1i64" )] - fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); + fn _vuqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } - _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + _vuqadd_s64(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v2i64" )] - fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + _vuqaddq_s64(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] + +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + link_name = "llvm.aarch64.neon.suqadd.v2i64" )] - fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vuqaddq_s64(a, b.as_signed()); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] + +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { + simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] + +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - vst3q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { + simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) +} + +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i64" + )] + fn _vuqaddd_s64(a: i64, b: i64) -> i64; + } + _vuqaddd_s64(a, b.as_signed()) +} + +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i32" + )] + fn _vuqadds_s32(a: i32, b: i32) -> i32; + } + _vuqadds_s32(a, b.as_signed()) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3q_lane_s64::(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdot_laneq_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, b, transmute(c)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - vst3q_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdot_laneq_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { - vst3q_s64(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdotq_laneq_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, b, transmute(c)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { - vst3q_s64(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdotq_laneq_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" - )] - fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8); - } - _vst4_f64(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" - )] - fn _vst4_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" - )] - fn _vst4_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { - static_assert!(LANE == 0); - vst4_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { - static_assert!(LANE == 0); - vst4_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" - )] - fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); - } - _vst4q_f64(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" - )] - fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); - } - _vst4q_s64(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" - )] - fn _vst4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" - )] - fn _vst4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" - )] - fn _vst4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4q_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - vst4q_lane_s8::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4q_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - vst4q_lane_s8::(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { - vst4q_s64(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { - vst4q_s64(transmute(a), transmute(b)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_sub(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_sub(a, b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { - a.wrapping_sub(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { - a.wrapping_sub(b) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: int16x8_t = simd_cast(c); - let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: int16x8_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: int32x4_t = simd_cast(c); - let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: int32x4_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: int64x2_t = simd_cast(c); - let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: int64x2_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint16x8_t = simd_cast(c); - let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint16x8_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: uint32x4_t = simd_cast(c); - let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: uint32x4_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: uint64x2_t = simd_cast(c); - let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: uint64x2_t = simd_cast(e); - simd_sub(d, f) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, transmute(c), b) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, transmute(c), b) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - let c: int64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let c: int64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { - let c: poly64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { - let c: poly64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c: uint64x1_t = simd_and(a, b); - let d: u64x1 = u64x1::new(0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c: uint64x2_t = simd_and(a, b); - let d: u64x2 = u64x2::new(0, 0); - simd_ne(c, transmute(d)) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { - transmute(vtst_s64(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { - transmute(vtst_u64(transmute(a), transmute(b))) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { - simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { - simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] + +#[doc = "Exclusive OR and rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(IMM6, 6); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i64" + link_name = "llvm.aarch64.crypto.xar" )] - fn _vuqaddd_s64(a: i64, b: i64) -> i64; + fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; } - _vuqaddd_s64(a, b.as_signed()) + _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned() } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] + +#[doc = "Exclusive OR and rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(IMM6, 6); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i32" + link_name = "llvm.aarch64.crypto.xar" )] - fn _vuqadds_s32(a: i32, b: i32) -> i32; + fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; } - _vuqadds_s32(a, b.as_signed()) -} -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdot_laneq_s32( - a: int32x2_t, - b: uint8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, b, transmute(c)) -} -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdotq_laneq_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, b, transmute(c)) + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 2]) -} -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) -} -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) -} -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) -} -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( a, b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [2, 6, 3, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [2, 6, 3, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [2, 6, 3, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [2, 6, 3, 7]) +pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) +pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_shuffle!( a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [2, 6, 3, 7]) +pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) +pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [2, 6, 3, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [2, 6, 3, 7]) +pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) +pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ) +pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) +pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [1, 3]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [2, 6, 3, 7]) +pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -21029,36 +54812,114 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ) } + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 48c162de4b..7592d0e02f 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -1,9 +1,9 @@ // This code is automatically generated. DO NOT MODIFY. // -// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: // // ``` -// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec // ``` #![allow(improper_ctypes)] @@ -12,854 +12,760 @@ use stdarch_test::assert_instr; use super::*; -#[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] +#[doc = "CRC32 single round checksum for bytes (8 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32b)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32b))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] -pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - let d: int8x8_t = vabd_s8(b, c); - let e: uint8x8_t = simd_cast(d); - simd_add(a, simd_cast(e)) +pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32b" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32b")] + fn ___crc32b(crc: i32, data: i32) -> i32; + } + ___crc32b(crc.as_signed(), data.as_signed() as i32).as_unsigned() } -#[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] + +#[doc = "CRC32-C single round checksum for bytes (8 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32cb))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] -pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let d: int16x4_t = vabd_s16(b, c); - let e: uint16x4_t = simd_cast(d); - simd_add(a, simd_cast(e)) +pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32cb" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cb")] + fn ___crc32cb(crc: i32, data: i32) -> i32; + } + ___crc32cb(crc.as_signed(), data.as_signed() as i32).as_unsigned() } -#[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] + +#[doc = "CRC32-C single round checksum for quad words (64 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[target_feature(enable = "crc")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(crc32cw))] #[cfg_attr( target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] -pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let d: int32x2_t = vabd_s32(b, c); - let e: uint32x2_t = simd_cast(d); - simd_add(a, simd_cast(e)) +pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { + __crc32cw( + __crc32cw(crc, (data & 0xFFFFFFFF) as u32), + (data >> 32) as u32, + ) } -#[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] + +#[doc = "CRC32-C single round checksum for bytes (16 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32ch))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] -pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - let d: uint8x8_t = vabd_u8(b, c); - simd_add(a, simd_cast(d)) +pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32ch" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32ch")] + fn ___crc32ch(crc: i32, data: i32) -> i32; + } + ___crc32ch(crc.as_signed(), data.as_signed() as i32).as_unsigned() } -#[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] + +#[doc = "CRC32-C single round checksum for bytes (32 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32cw))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] +pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32cw" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] + fn ___crc32cw(crc: i32, data: i32) -> i32; + } + ___crc32cw(crc.as_signed(), data.as_signed()).as_unsigned() +} + +#[doc = "CRC32 single round checksum for quad words (64 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "crc")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(crc32w))] #[cfg_attr( target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] -pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - let d: uint16x4_t = vabd_u16(b, c); - simd_add(a, simd_cast(d)) +pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { + __crc32w( + __crc32w(crc, (data & 0xFFFFFFFF) as u32), + (data >> 32) as u32, + ) } -#[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] + +#[doc = "CRC32 single round checksum for bytes (16 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32h))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] -pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - let d: uint32x2_t = vabd_u32(b, c); - simd_add(a, simd_cast(d)) +pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crc32h" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32h")] + fn ___crc32h(crc: i32, data: i32) -> i32; + } + ___crc32h(crc.as_signed(), data.as_signed() as i32).as_unsigned() } -#[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] + +#[doc = "CRC32 single round checksum for bytes (32 bits)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] +#[target_feature(enable = "crc")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(crc32w))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabd) + target_arch = "arm", + unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] -pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] +pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v2f32" + link_name = "llvm.aarch64.crc32w" )] - fn _vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] + fn ___crc32w(crc: i32, data: i32) -> i32; } - _vabd_f32(a, b) + ___crc32w(crc.as_signed(), data.as_signed()).as_unsigned() } -#[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v4f32" - )] - fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; +unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i16.v8i8")] + fn _priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } - _vabdq_f32(a, b) + _priv_vpadal_s8(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] - fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i16.v8i8")] + fn _priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } - _vabd_s8(a, b) + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = _priv_vpadal_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] - fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v8i16.v16i8")] + fn _priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } - _vabdq_s8(a, b) + _priv_vpadalq_s8(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] - fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v8i16.v16i8")] + fn _priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } - _vabd_s16(a, b) + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = _priv_vpadalq_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] - fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i32.v4i16")] + fn _priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } - _vabdq_s16(a, b) + _priv_vpadal_s16(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] - fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i32.v4i16")] + fn _priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } - _vabd_s32(a, b) + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x2_t = _priv_vpadal_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] - fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i32.v8i16")] + fn _priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } - _vabdq_s32(a, b) + _priv_vpadalq_s16(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] - fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i32.v8i16")] + fn _priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } - _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = _priv_vpadalq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] - fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] + fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } - _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadal_s32(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] - fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] + fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } - _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + _priv_vpadal_s32(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] - fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i64.v4i32")] + fn _priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } - _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadalq_s32(a, b) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] - fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i64.v4i32")] + fn _priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } - _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = _priv_vpadalq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] - fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] + fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } - _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let c: uint8x8_t = simd_cast(vabd_s8(a, b)); - simd_cast(c) +unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] + fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let c: uint16x4_t = simd_cast(vabd_s16(a, b)); - simd_cast(c) +unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] + fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; + } + _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let c: uint32x2_t = simd_cast(vabd_s32(a, b)); - simd_cast(c) +unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] + fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - simd_cast(vabd_u8(a, b)) +unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] + fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; + } + _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - simd_cast(vabd_u16(a, b)) +unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] + fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x2_t = _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - simd_cast(vabd_u32(a, b)) +unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] + fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; + } + _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { - simd_fabs(a) +unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] + fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { - simd_fabs(a) +unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] + fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; + } + _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_xor(a, b) +unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] + fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; + } + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_xor(a, b) +unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] + fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; + } + _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_xor(a, b) +unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] + fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint64x2_t = _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -869,20 +775,24 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_xor(a, b) +pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + let d: int8x8_t = vabd_s8(b, c); + let e: uint8x8_t = simd_cast(d); + simd_add(a, simd_cast(e)) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -892,20 +802,28 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - simd_xor(a, b) +pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = vabd_s8(b, c); + let e: uint8x8_t = simd_cast(d); + let ret_val: int16x8_t = simd_add(a, simd_cast(e)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -915,20 +833,24 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_xor(a, b) +pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let d: int16x4_t = vabd_s16(b, c); + let e: uint16x4_t = simd_cast(d); + simd_add(a, simd_cast(e)) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -938,20 +860,28 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { - a ^ b +pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: int16x4_t = vabd_s16(b, c); + let e: uint16x4_t = simd_cast(d); + let ret_val: int32x4_t = simd_add(a, simd_cast(e)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -961,20 +891,24 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_and(a, b) +pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let d: int32x2_t = vabd_s32(b, c); + let e: uint32x2_t = simd_cast(d); + simd_add(a, simd_cast(e)) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] + +#[doc = "Signed Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -984,20 +918,28 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_and(a, b) +pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let d: int32x2_t = vabd_s32(b, c); + let e: uint32x2_t = simd_cast(d); + let ret_val: int64x2_t = simd_add(a, simd_cast(e)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1007,20 +949,23 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_and(a, b) +pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + let d: uint8x8_t = vabd_u8(b, c); + simd_add(a, simd_cast(d)) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1030,20 +975,27 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_and(a, b) +pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: uint8x8_t = vabd_u8(b, c); + let ret_val: uint16x8_t = simd_add(a, simd_cast(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1053,20 +1005,23 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_and(a, b) +pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + let d: uint16x4_t = vabd_u16(b, c); + simd_add(a, simd_cast(d)) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1076,20 +1031,27 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_and(a, b) +pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: uint16x4_t = vabd_u16(b, c); + let ret_val: uint32x4_t = simd_add(a, simd_cast(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1099,20 +1061,23 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_and(a, b) +pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + let d: uint32x2_t = vabd_u32(b, c); + simd_add(a, simd_cast(d)) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] + +#[doc = "Unsigned Absolute difference and Accumulate Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(uabal) )] #[cfg_attr( not(target_arch = "arm"), @@ -1122,20 +1087,27 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_and(a, b) +pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let d: uint32x2_t = vabd_u32(b, c); + let ret_val: uint64x2_t = simd_add(a, simd_cast(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] + +#[doc = "Absolute difference between the arguments of Floating"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1145,20 +1117,30 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_and(a, b) +pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fabd.v2f32" + )] + fn _vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vabd_f32(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] + +#[doc = "Absolute difference between the arguments of Floating"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1168,20 +1150,33 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_and(a, b) +pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fabd.v2f32" + )] + fn _vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vabd_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] + +#[doc = "Absolute difference between the arguments of Floating"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1191,20 +1186,30 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_and(a, b) +pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fabd.v4f32" + )] + fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vabdq_f32(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] + +#[doc = "Absolute difference between the arguments of Floating"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1214,20 +1219,33 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_and(a, b) +pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fabd.v4f32" + )] + fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vabdq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1237,20 +1255,30 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_and(a, b) +pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] + fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vabd_s8(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1260,20 +1288,33 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_and(a, b) +pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] + fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vabd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1283,20 +1324,30 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_and(a, b) +pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] + fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vabdq_s8(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1306,20 +1357,37 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_and(a, b) +pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] + fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vabdq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1329,28 +1397,30 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] +pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" + link_name = "llvm.aarch64.neon.sabd.v4i16" )] - fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] + fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vcage_f32(a, b).as_unsigned() + _vabd_s16(a, b) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1360,28 +1430,33 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] +pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" + link_name = "llvm.aarch64.neon.sabd.v4i16" )] - fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] + fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vcageq_f32(a, b).as_unsigned() + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vabd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1391,28 +1466,30 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] +pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" + link_name = "llvm.aarch64.neon.sabd.v8i16" )] - fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] + fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vcagt_f32(a, b).as_unsigned() + _vabdq_s16(a, b) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1422,28 +1499,33 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] +pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" + link_name = "llvm.aarch64.neon.sabd.v8i16" )] - fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] + fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vcagtq_f32(a, b).as_unsigned() + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vabdq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1453,20 +1535,30 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcage_f32(b, a) +pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] + fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vabd_s32(a, b) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1476,20 +1568,33 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcageq_f32(b, a) +pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] + fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vabd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1499,20 +1604,30 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcagt_f32(b, a) +pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] + fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vabdq_s32(a, b) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1522,20 +1637,33 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcagtq_f32(b, a) +pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sabd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] + fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vabdq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1545,20 +1673,30 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] + fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1568,20 +1706,33 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] + fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1591,20 +1742,30 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] + fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1614,20 +1775,37 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] + fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1637,20 +1815,30 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_eq(a, b) +pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] + fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1660,20 +1848,33 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_eq(a, b) +pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] + fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1683,20 +1884,30 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] + fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1706,20 +1917,33 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] + fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1729,20 +1953,30 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] + fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1752,20 +1986,33 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] + fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1775,20 +2022,30 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_eq(a, b) +pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] + fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] + +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1798,20 +2055,33 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_eq(a, b) +pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uabd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] + fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1821,20 +2091,23 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let c: uint8x8_t = simd_cast(vabd_s8(a, b)); + simd_cast(c) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1844,20 +2117,26 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_cast(vabd_s8(a, b)); + let ret_val: int16x8_t = simd_cast(c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1867,20 +2146,23 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let c: uint16x4_t = simd_cast(vabd_s16(a, b)); + simd_cast(c) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1890,20 +2172,26 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_cast(vabd_s16(a, b)); + let ret_val: int32x4_t = simd_cast(c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1913,20 +2201,23 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let c: uint32x2_t = simd_cast(vabd_s32(a, b)); + simd_cast(c) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] + +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1936,20 +2227,26 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_cast(vabd_s32(a, b)); + let ret_val: int64x2_t = simd_cast(c); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1959,20 +2256,22 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_ge(a, b) +pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + simd_cast(vabd_u8(a, b)) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1982,20 +2281,25 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_ge(a, b) +pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_cast(vabd_u8(a, b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2005,20 +2309,22 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_ge(a, b) +pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + simd_cast(vabd_u16(a, b)) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2028,20 +2334,25 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_ge(a, b) +pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_cast(vabd_u16(a, b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2051,20 +2362,22 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + simd_cast(vabd_u32(a, b)) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] + +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -2074,20 +2387,25 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_cast(vabd_u32(a, b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] + +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2097,20 +2415,22 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_ge(a, b) +pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { + simd_fabs(a) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] + +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2120,20 +2440,24 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_ge(a, b) +pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_fabs(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] + +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2143,20 +2467,22 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_ge(a, b) +pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { + simd_fabs(a) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] + +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2166,20 +2492,24 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_ge(a, b) +pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_fabs(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2189,20 +2519,30 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] + fn _vabs_s8(a: int8x8_t) -> int8x8_t; + } + _vabs_s8(a) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2212,20 +2552,32 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] + fn _vabs_s8(a: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vabs_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2235,20 +2587,30 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] + fn _vabsq_s8(a: int8x16_t) -> int8x16_t; + } + _vabsq_s8(a) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2258,20 +2620,36 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] + fn _vabsq_s8(a: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vabsq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2281,20 +2659,30 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_gt(a, b) +pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] + fn _vabs_s16(a: int16x4_t) -> int16x4_t; + } + _vabs_s16(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2304,20 +2692,32 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_gt(a, b) +pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] + fn _vabs_s16(a: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vabs_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2327,20 +2727,30 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_gt(a, b) +pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] + fn _vabsq_s16(a: int16x8_t) -> int16x8_t; + } + _vabsq_s16(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2350,20 +2760,32 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_gt(a, b) +pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] + fn _vabsq_s16(a: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vabsq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2373,20 +2795,30 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] + fn _vabs_s32(a: int32x2_t) -> int32x2_t; + } + _vabs_s32(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2396,20 +2828,32 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] + fn _vabs_s32(a: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vabs_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2419,20 +2863,30 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_gt(a, b) +pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] + fn _vabsq_s32(a: int32x4_t) -> int32x4_t; + } + _vabsq_s32(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] + +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -2442,20 +2896,32 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_gt(a, b) +pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.abs.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] + fn _vabsq_s32(a: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vabsq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2465,20 +2931,22 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_gt(a, b) +pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_xor(a, b) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2488,20 +2956,25 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_gt(a, b) +pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2511,20 +2984,22 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_xor(a, b) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2534,20 +3009,29 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_xor(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2557,20 +3041,22 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_le(a, b) +pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_xor(a, b) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2580,20 +3066,25 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_le(a, b) +pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2603,20 +3094,22 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_le(a, b) +pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_xor(a, b) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2626,20 +3119,24 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_le(a, b) +pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2649,20 +3146,22 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_le(a, b) +pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + simd_xor(a, b) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2672,20 +3171,22 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_le(a, b) +pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_xor(a, b) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2695,20 +3196,24 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_le(a, b) +pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] + +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2718,112 +3223,304 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_le(a, b) +pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { + a ^ b } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] + +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesd))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesd" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] + fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned() +} + +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesd))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_le(a, b) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesd" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] + fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + let data: uint8x16_t = simd_shuffle!( + data, + data, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let key: uint8x16_t = simd_shuffle!( + key, + key, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let ret_val: uint8x16_t = _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] + +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aese))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aese" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] + fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned() +} + +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aese))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_le(a, b) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aese" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] + fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + let data: uint8x16_t = simd_shuffle!( + data, + data, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let key: uint8x16_t = simd_shuffle!( + key, + key, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let ret_val: uint8x16_t = _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] + +#[doc = "AES inverse mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesimc))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesimc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] + fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; + } + _vaesimcq_u8(data.as_signed()).as_unsigned() +} + +#[doc = "AES inverse mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesimc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_le(a, b) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesimc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] + fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; + } + let data: uint8x16_t = simd_shuffle!( + data, + data, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let ret_val: uint8x16_t = _vaesimcq_u8(data.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] + +#[doc = "AES mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesmc))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesmc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] + fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; + } + _vaesmcq_u8(data.as_signed()).as_unsigned() +} + +#[doc = "AES mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesmc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_le(a, b) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesmc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] + fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; + } + let data: uint8x16_t = simd_shuffle!( + data, + data, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + let ret_val: uint8x16_t = _vaesmcq_u8(data.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2833,20 +3530,22 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_le(a, b) +pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_and(a, b) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2856,20 +3555,25 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_le(a, b) +pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2879,28 +3583,22 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i8" - )] - fn _vcls_s8(a: int8x8_t) -> int8x8_t; - } - _vcls_s8(a) +pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2910,28 +3608,29 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v16i8" - )] - fn _vclsq_s8(a: int8x16_t) -> int8x16_t; - } - _vclsq_s8(a) +pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_and(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2941,28 +3640,22 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i16" - )] - fn _vcls_s16(a: int16x4_t) -> int16x4_t; - } - _vcls_s16(a) +pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2972,28 +3665,25 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i16" - )] - fn _vclsq_s16(a: int16x8_t) -> int16x8_t; - } - _vclsq_s16(a) +pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3003,28 +3693,22 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v2i32" - )] - fn _vcls_s32(a: int32x2_t) -> int32x2_t; - } - _vcls_s32(a) +pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3034,28 +3718,25 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i32" - )] - fn _vclsq_s32(a: int32x4_t) -> int32x4_t; - } - _vclsq_s32(a) +pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3065,20 +3746,22 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { - vcls_s8(transmute(a)) +pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3088,20 +3771,25 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { - vclsq_s8(transmute(a)) +pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3111,20 +3799,22 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { - vcls_s16(transmute(a)) +pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3134,20 +3824,24 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { - vclsq_s16(transmute(a)) +pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3157,20 +3851,22 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { - vcls_s32(transmute(a)) +pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_and(a, b) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3180,20 +3876,22 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { - vclsq_s32(transmute(a)) +pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_and(a, b) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3203,20 +3901,25 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3226,20 +3929,22 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_and(a, b) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3249,20 +3954,25 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_lt(a, b) +pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3272,20 +3982,22 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_lt(a, b) +pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_and(a, b) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3295,20 +4007,29 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_lt(a, b) +pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_and(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3318,20 +4039,22 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_lt(a, b) +pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_and(a, b) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3341,20 +4064,25 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3364,20 +4092,22 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_and(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3387,20 +4117,25 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_lt(a, b) +pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3410,20 +4145,22 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_lt(a, b) +pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_and(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3433,20 +4170,25 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_lt(a, b) +pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3456,20 +4198,22 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_lt(a, b) +pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_and(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3479,20 +4223,24 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3502,20 +4250,22 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_and(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3525,20 +4275,22 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { - vclz_s16_(a) +pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_and(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] + +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -3548,20 +4300,25 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { - vclzq_s16_(a) +pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_and(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3571,20 +4328,30 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { - vclz_s32_(a) +pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" + )] + fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + } + _vcage_f32(a, b).as_unsigned() } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3594,20 +4361,33 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { - vclzq_s32_(a) +pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" + )] + fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vcage_f32(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3617,20 +4397,30 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { - vclz_s8_(a) +pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" + )] + fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + } + _vcageq_f32(a, b).as_unsigned() } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] + +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3640,20 +4430,33 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { - vclzq_s8_(a) +pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" + )] + fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcageq_f32(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3663,20 +4466,30 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - transmute(vclz_s16_(transmute(a))) +pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" + )] + fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + } + _vcagt_f32(a, b).as_unsigned() } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3686,20 +4499,33 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - transmute(vclzq_s16_(transmute(a))) +pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" + )] + fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vcagt_f32(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3709,20 +4535,30 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - transmute(vclz_s32_(transmute(a))) +pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" + )] + fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + } + _vcagtq_f32(a, b).as_unsigned() } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] + +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3732,20 +4568,33 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - transmute(vclzq_s32_(transmute(a))) +pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" + )] + fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcagtq_f32(a, b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3755,20 +4604,22 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vclz_s8_(transmute(a))) +pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcage_f32(b, a) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3778,20 +4629,25 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vclzq_s8_(transmute(a))) +pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = vcage_f32(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3801,20 +4657,22 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { - transmute(a) +pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcageq_f32(b, a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] + +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3824,20 +4682,25 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { - transmute(a) +pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vcageq_f32(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] -#[doc = "## Safety"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] +#[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3847,20 +4710,22 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { - transmute(a) +pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcagt_f32(b, a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3870,20 +4735,25 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { - transmute(a) +pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = vcagt_f32(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3893,20 +4763,22 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { - transmute(a) +pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcagtq_f32(b, a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] + +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3916,20 +4788,25 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { - transmute(a) +pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vcagtq_f32(b, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3939,20 +4816,22 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { - transmute(a) +pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3962,20 +4841,25 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { - transmute(a) +pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3985,20 +4869,22 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { - transmute(a) +pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] + +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4008,20 +4894,25 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { - transmute(a) +pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4031,20 +4922,22 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { - transmute(a) +pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4054,20 +4947,25 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { - transmute(a) +pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4077,20 +4975,22 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { - simd_cast(a) +pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4100,20 +5000,29 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { - simd_cast(a) +pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_eq(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4123,20 +5032,22 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { - simd_cast(a) +pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4146,356 +5057,262 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { - simd_cast(a) +pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_s32(a, N) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_s32(a, N) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_s32(a, N) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_s32(a, N) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_u32(a.as_signed(), N) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_u32(a.as_signed(), N) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_u32(a.as_signed(), N) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_u32(a.as_signed(), N) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" - )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_s32_f32(a, N) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" - )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" - )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" - )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4505,28 +5322,29 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i32.v2f32" - )] - fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvt_s32_f32(a) +pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_eq(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4536,28 +5354,22 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v4i32.v4f32" - )] - fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtq_s32_f32(a) +pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_eq(a, b) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4567,28 +5379,25 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i32.v2f32" - )] - fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvt_u32_f32(a).as_unsigned() +pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -4598,274 +5407,235 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v4i32.v4f32" - )] - fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtq_u32_f32(a).as_unsigned() +pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_eq(a, b) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(cmeq) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_s32(a, b, transmute(c)) +pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(cmeq) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_s32(a, b, transmute(c)) +pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(cmeq) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x8_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_u32(a, b, transmute(c)) +pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(cmeq) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_u32(a, b, transmute(c)) +pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" - )] - fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vdot_s32(a, b, c) +pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" - )] - fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vdotq_s32(a, b, c) +pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" - )] - fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_eq(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" - )] - fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] + +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cmeq) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4874,23 +5644,30 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_eq(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(fcmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4899,23 +5676,23 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(fcmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4924,23 +5701,26 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(fcmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4949,23 +5729,23 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] + +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(fcmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4974,23 +5754,26 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -4999,23 +5782,23 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5024,23 +5807,26 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5049,23 +5835,23 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5074,23 +5860,30 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_ge(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5099,27 +5892,23 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5128,27 +5917,26 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5157,27 +5945,23 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5186,27 +5970,26 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5215,27 +5998,23 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5244,27 +6023,26 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5273,30 +6051,23 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] + +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5305,30 +6076,26 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5337,30 +6104,23 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5369,23 +6129,26 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5394,23 +6157,23 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5419,23 +6182,30 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_ge(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5444,23 +6214,23 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5469,23 +6239,26 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5494,23 +6267,23 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5519,23 +6292,26 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5544,23 +6320,23 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5569,23 +6345,26 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5594,23 +6373,23 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] + +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5619,23 +6398,26 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_ge(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(fcmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5644,27 +6426,23 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(fcmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5673,27 +6451,26 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(fcmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5702,27 +6479,23 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] + +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(fcmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5731,27 +6504,26 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5760,27 +6532,23 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5789,27 +6557,26 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5818,30 +6585,23 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5850,30 +6610,30 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); +pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_gt(a, b); simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5882,30 +6642,23 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5914,23 +6667,26 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5939,23 +6695,23 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5964,23 +6720,26 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5989,23 +6748,23 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6014,23 +6773,26 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(cmgt) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6039,21 +6801,22 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] + +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6063,20 +6826,25 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_xor(a, b) +pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6086,20 +6854,22 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_xor(a, b) +pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6109,20 +6879,25 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_xor(a, b) +pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6132,20 +6907,22 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6155,20 +6932,29 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_gt(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6178,20 +6964,22 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_xor(a, b) +pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6201,20 +6989,25 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_xor(a, b) +pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6224,20 +7017,22 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6247,20 +7042,25 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6270,20 +7070,22 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_xor(a, b) +pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6293,20 +7095,25 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_xor(a, b) +pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6316,20 +7123,22 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] + +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -6339,20 +7148,25 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_xor(a, b) +pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_gt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -6362,20 +7176,22 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_xor(a, b) +pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -6385,20 +7201,25 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_xor(a, b) +pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -6408,22 +7229,23 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_xor(a, b) +pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] + +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(fcmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6432,27 +7254,26 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6461,27 +7282,23 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6490,27 +7307,26 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6519,33 +7335,23 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6554,33 +7360,30 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_le(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6589,33 +7392,23 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6624,33 +7417,26 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6659,33 +7445,23 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6694,33 +7470,26 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6729,29 +7498,23 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6760,29 +7523,26 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6791,29 +7551,23 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] + +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmge) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6822,29 +7576,26 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6853,29 +7604,23 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6884,29 +7629,26 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6915,27 +7657,23 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6944,27 +7682,30 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_le(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6973,101 +7714,23 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_le(a, b) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7076,101 +7739,26 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(cmhs) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7179,99 +7767,22 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_le(a, b) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7281,25 +7792,25 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] - fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vfma_f32(b, c, a) +pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7309,25 +7820,22 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] - fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - _vfmaq_f32(b, c, a) +pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7337,20 +7845,25 @@ pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfma_f32(a, b, vdup_n_f32_vfp4(c)) +pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7360,20 +7873,22 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) +pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] + +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7383,21 +7898,25 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let b: float32x2_t = simd_neg(b); - vfma_f32(a, b, c) +pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_le(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7407,21 +7926,30 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let b: float32x4_t = simd_neg(b); - vfmaq_f32(a, b, c) +pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i8" + )] + fn _vcls_s8(a: int8x8_t) -> int8x8_t; + } + _vcls_s8(a) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7431,20 +7959,32 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfms_f32(a, b, vdup_n_f32_vfp4(c)) +pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i8" + )] + fn _vcls_s8(a: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vcls_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7454,20 +7994,30 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) +pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v16i8" + )] + fn _vclsq_s8(a: int8x16_t) -> int8x16_t; + } + _vclsq_s8(a) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7477,28 +8027,36 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { +pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i8" + link_name = "llvm.aarch64.neon.cls.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] - fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vclsq_s8(a: int8x16_t) -> int8x16_t; } - _vhadd_s8(a, b) + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vclsq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7508,28 +8066,30 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { +pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v16i8" + link_name = "llvm.aarch64.neon.cls.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] - fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vcls_s16(a: int16x4_t) -> int16x4_t; } - _vhaddq_s8(a, b) + _vcls_s16(a) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7539,28 +8099,32 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i16" + link_name = "llvm.aarch64.neon.cls.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] - fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vcls_s16(a: int16x4_t) -> int16x4_t; } - _vhadd_s16(a, b) + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vcls_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7570,28 +8134,30 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { +pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i16" + link_name = "llvm.aarch64.neon.cls.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] - fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vclsq_s16(a: int16x8_t) -> int16x8_t; } - _vhaddq_s16(a, b) + _vclsq_s16(a) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7601,28 +8167,32 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v2i32" + link_name = "llvm.aarch64.neon.cls.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] - fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vclsq_s16(a: int16x8_t) -> int16x8_t; } - _vhadd_s32(a, b) + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vclsq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7632,28 +8202,30 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i32" + link_name = "llvm.aarch64.neon.cls.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] - fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vcls_s32(a: int32x2_t) -> int32x2_t; } - _vhaddq_s32(a, b) + _vcls_s32(a) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7663,28 +8235,32 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { +pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i8" + link_name = "llvm.aarch64.neon.cls.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vcls_s32(a: int32x2_t) -> int32x2_t; } - _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcls_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7694,28 +8270,30 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { +pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v16i8" + link_name = "llvm.aarch64.neon.cls.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vclsq_s32(a: int32x4_t) -> int32x4_t; } - _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vclsq_s32(a) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7725,28 +8303,32 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i16" + link_name = "llvm.aarch64.neon.cls.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vclsq_s32(a: int32x4_t) -> int32x4_t; } - _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vclsq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7756,28 +8338,22 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { + vcls_s8(transmute(a)) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7787,28 +8363,24 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vcls_s8(transmute(a)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7818,28 +8390,22 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { + vclsq_s8(transmute(a)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7849,28 +8415,28 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] - fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vhsub_s16(a, b) +pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vclsq_s8(transmute(a)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7880,28 +8446,22 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] - fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vhsubq_s16(a, b) +pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { + vcls_s16(transmute(a)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7911,28 +8471,24 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] - fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhsub_s32(a, b) +pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = vcls_s16(transmute(a)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7942,28 +8498,22 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] - fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhsubq_s32(a, b) +pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { + vclsq_s16(transmute(a)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -7973,28 +8523,24 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] - fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vhsub_s8(a, b) +pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vclsq_s16(transmute(a)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -8004,28 +8550,22 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] - fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vhsubq_s8(a, b) +pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { + vcls_s32(transmute(a)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -8035,28 +8575,24 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = vcls_s32(transmute(a)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -8066,28 +8602,22 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { + vclsq_s32(transmute(a)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] + +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -8097,28 +8627,24 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vclsq_s32(transmute(a)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8128,28 +8654,22 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8159,28 +8679,25 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8190,28 +8707,22 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] + +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8221,28 +8732,25 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] - fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; - } - _vld1_f32_x2(a) +pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8252,28 +8760,22 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] - fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; - } - _vld1_f32_x3(a) +pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8283,28 +8785,25 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] - fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; - } - _vld1_f32_x4(a) +pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8314,28 +8813,22 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] - fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; - } - _vld1q_f32_x2(a) +pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8345,28 +8838,29 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] - fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; - } - _vld1q_f32_x3(a) +pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_lt(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8376,28 +8870,22 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] - fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; - } - _vld1q_f32_x4(a) +pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8407,20 +8895,25 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { - transmute(vld1_s64_x2(transmute(a))) +pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8430,20 +8923,22 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { - transmute(vld1_s64_x3(transmute(a))) +pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8453,20 +8948,25 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { - transmute(vld1_s64_x4(transmute(a))) +pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8476,20 +8976,22 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { - transmute(vld1q_s64_x2(transmute(a))) +pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8499,20 +9001,25 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { - transmute(vld1q_s64_x3(transmute(a))) +pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8522,20 +9029,22 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { - transmute(vld1q_s64_x4(transmute(a))) +pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] + +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -8545,28 +9054,25 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] - fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; - } - _vld1_s8_x2(a) +pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8576,28 +9082,22 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] - fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; - } - _vld1_s8_x3(a) +pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8607,28 +9107,25 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] - fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; - } - _vld1_s8_x4(a) +pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8638,28 +9135,22 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] - fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; - } - _vld1q_s8_x2(a) +pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8669,28 +9160,29 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] - fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; - } - _vld1q_s8_x3(a) +pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_lt(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8700,28 +9192,22 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] - fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; - } - _vld1q_s8_x4(a) +pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8731,28 +9217,25 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] - fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; - } - _vld1_s16_x2(a) +pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8762,28 +9245,22 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] - fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; - } - _vld1_s16_x3(a) +pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8793,28 +9270,25 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] - fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; - } - _vld1_s16_x4(a) +pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8824,28 +9298,22 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] - fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; - } - _vld1q_s16_x2(a) +pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8855,28 +9323,25 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] - fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; - } - _vld1q_s16_x3(a) +pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8886,28 +9351,22 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] - fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; - } - _vld1q_s16_x4(a) +pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] + +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -8917,28 +9376,25 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] - fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; - } - _vld1_s32_x2(a) +pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_lt(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -8948,28 +9404,30 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" + link_name = "llvm.ctlz.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] - fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; + fn _vclz_s8(a: int8x8_t) -> int8x8_t; } - _vld1_s32_x3(a) + _vclz_s8(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -8979,28 +9437,32 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" + link_name = "llvm.ctlz.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] - fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; + fn _vclz_s8(a: int8x8_t) -> int8x8_t; } - _vld1_s32_x4(a) + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vclz_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9010,28 +9472,30 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" + link_name = "llvm.ctlz.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] - fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; + fn _vclzq_s8(a: int8x16_t) -> int8x16_t; } - _vld1q_s32_x2(a) + _vclzq_s8(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9041,28 +9505,36 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" + link_name = "llvm.ctlz.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] - fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; + fn _vclzq_s8(a: int8x16_t) -> int8x16_t; } - _vld1q_s32_x3(a) + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vclzq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9072,28 +9544,30 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" + link_name = "llvm.ctlz.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] - fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; + fn _vclz_s16(a: int16x4_t) -> int16x4_t; } - _vld1q_s32_x4(a) + _vclz_s16(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9103,28 +9577,32 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64" + link_name = "llvm.ctlz.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0i64")] - fn _vld1_s64_x2(a: *const i64) -> int64x1x2_t; + fn _vclz_s16(a: int16x4_t) -> int16x4_t; } - _vld1_s64_x2(a) + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vclz_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9134,28 +9612,30 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64" + link_name = "llvm.ctlz.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0i64")] - fn _vld1_s64_x3(a: *const i64) -> int64x1x3_t; + fn _vclzq_s16(a: int16x8_t) -> int16x8_t; } - _vld1_s64_x3(a) + _vclzq_s16(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9165,28 +9645,32 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64" + link_name = "llvm.ctlz.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0i64")] - fn _vld1_s64_x4(a: *const i64) -> int64x1x4_t; + fn _vclzq_s16(a: int16x8_t) -> int16x8_t; } - _vld1_s64_x4(a) + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vclzq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9196,28 +9680,30 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" + link_name = "llvm.ctlz.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] - fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; + fn _vclz_s32(a: int32x2_t) -> int32x2_t; } - _vld1q_s64_x2(a) + _vclz_s32(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9227,28 +9713,32 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" + link_name = "llvm.ctlz.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] - fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + fn _vclz_s32(a: int32x2_t) -> int32x2_t; } - _vld1q_s64_x3(a) + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vclz_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9258,28 +9748,30 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { +pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" + link_name = "llvm.ctlz.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] - fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + fn _vclzq_s32(a: int32x4_t) -> int32x4_t; } - _vld1q_s64_x4(a) + _vclzq_s32(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9289,20 +9781,32 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { - transmute(vld1_s8_x2(transmute(a))) +pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v4i32" + )] + fn _vclzq_s32(a: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vclzq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9312,20 +9816,22 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { - transmute(vld1_s8_x3(transmute(a))) +pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + transmute(vclz_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9335,20 +9841,24 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { - transmute(vld1_s8_x4(transmute(a))) +pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9358,20 +9868,22 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { - transmute(vld1q_s8_x2(transmute(a))) +pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + transmute(vclzq_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9381,20 +9893,24 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { - transmute(vld1q_s8_x3(transmute(a))) +pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9404,20 +9920,22 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { - transmute(vld1q_s8_x4(transmute(a))) +pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + transmute(vclz_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9427,20 +9945,24 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { - transmute(vld1_s16_x2(transmute(a))) +pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9450,20 +9972,22 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { - transmute(vld1_s16_x3(transmute(a))) +pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + transmute(vclzq_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9473,20 +9997,24 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { - transmute(vld1_s16_x4(transmute(a))) +pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9496,20 +10024,22 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { - transmute(vld1q_s16_x2(transmute(a))) +pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vclz_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9519,20 +10049,24 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { - transmute(vld1q_s16_x3(transmute(a))) +pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9542,20 +10076,22 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { - transmute(vld1q_s16_x4(transmute(a))) +pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vclzq_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] + +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -9565,20 +10101,28 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { - transmute(vld1_s32_x2(transmute(a))) +pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9588,20 +10132,30 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { - transmute(vld1_s32_x3(transmute(a))) +pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] + fn _vcnt_s8(a: int8x8_t) -> int8x8_t; + } + _vcnt_s8(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9611,20 +10165,32 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { - transmute(vld1_s32_x4(transmute(a))) +pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] + fn _vcnt_s8(a: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vcnt_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9634,20 +10200,30 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { - transmute(vld1q_s32_x2(transmute(a))) +pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] + fn _vcntq_s8(a: int8x16_t) -> int8x16_t; + } + _vcntq_s8(a) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9657,20 +10233,36 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { - transmute(vld1q_s32_x3(transmute(a))) +pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] + fn _vcntq_s8(a: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vcntq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9680,20 +10272,22 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { - transmute(vld1q_s32_x4(transmute(a))) +pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vcnt_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9703,20 +10297,24 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { - transmute(vld1_s64_x2(transmute(a))) +pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9726,20 +10324,22 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { - transmute(vld1_s64_x3(transmute(a))) +pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vcntq_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9749,20 +10349,28 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { - transmute(vld1_s64_x4(transmute(a))) +pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9772,20 +10380,22 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { - transmute(vld1q_s64_x2(transmute(a))) +pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + transmute(vcnt_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9795,20 +10405,24 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { - transmute(vld1q_s64_x3(transmute(a))) +pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9818,20 +10432,22 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { - transmute(vld1q_s64_x4(transmute(a))) +pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + transmute(vcntq_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] + +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -9841,21 +10457,25 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { - transmute(vld1_s8_x2(transmute(a))) +pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9864,21 +10484,19 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { - transmute(vld1_s8_x3(transmute(a))) +pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9887,21 +10505,22 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { - transmute(vld1_s8_x4(transmute(a))) +pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9910,21 +10529,19 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { - transmute(vld1q_s8_x2(transmute(a))) +pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9933,21 +10550,27 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { - transmute(vld1q_s8_x3(transmute(a))) +pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9956,21 +10579,19 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { - transmute(vld1q_s8_x4(transmute(a))) +pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9979,21 +10600,22 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { - transmute(vld1_s16_x2(transmute(a))) +pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10002,21 +10624,19 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { - transmute(vld1_s16_x3(transmute(a))) +pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10025,21 +10645,22 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { - transmute(vld1_s16_x4(transmute(a))) +pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10048,21 +10669,19 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { - transmute(vld1q_s16_x2(transmute(a))) +pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10071,21 +10690,20 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { - transmute(vld1q_s16_x3(transmute(a))) +pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10094,301 +10712,138 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { - transmute(vld1q_s16_x4(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] - fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] - fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] - fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] - fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] - fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] - fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] - fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] - fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" - )] - fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; - } - _vld2_dup_f32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" - )] - fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as _) +pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" - )] - fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; - } - _vld2_dup_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" - )] - fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; - } - _vld2q_dup_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" - )] - fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; - } - _vld2_dup_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" - )] - fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; - } - _vld2q_dup_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" - )] - fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" - )] - fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10397,56 +10852,41 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0i8")] - fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; - } - _vld2_dup_s64(a as *const i8, 8) +pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1i64.p0i64" - )] - fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; - } - _vld2_dup_s64(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10455,21 +10895,19 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) +pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10478,21 +10916,27 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) +pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10501,21 +10945,19 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) +pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10524,21 +10966,22 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) +pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10547,21 +10990,19 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) +pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] + +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10570,20 +11011,23 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_dup_s32(transmute(a))) +pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -10593,20 +11037,22 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_dup_s32(transmute(a))) +pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -10616,20 +11062,23 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) +pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -10639,20 +11088,22 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) +pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { + transmute(a) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -10662,20 +11113,23 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) +pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -10685,613 +11139,427 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) +pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] - fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_f32(a as *const i8, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] - fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_f32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] - fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_s8(a as *const i8, 1) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] - fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] - fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_s16(a as *const i8, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] - fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_s16(a as *const i8, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] - fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_s32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] - fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_s32(a as *const i8, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" - )] - fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; - } - _vld2_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" - )] - fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; - } - _vld2q_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" - )] - fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; - } - _vld2_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" - )] - fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; - } - _vld2q_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" - )] - fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; - } - _vld2_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" - )] - fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; - } - _vld2q_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" - )] - fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; - } - _vld2_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" - )] - fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; - } - _vld2q_s32(a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { + transmute(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" - )] - fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; - } - _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" - )] - fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) - -> float32x4x2_t; - } - _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" - )] - fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; - } - _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" - )] - fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; - } - _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" - )] - fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; - } - _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" - )] - fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; - } - _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" - )] - fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; - } - _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] - fn _vld2_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x2_t; - } - _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] - fn _vld2q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x2_t; - } - _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] - fn _vld2q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x2_t; - } - _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] - fn _vld2q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x2_t; - } - _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] - fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) - -> int8x8x2_t; - } - _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] - fn _vld2_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x2_t; - } - _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] - fn _vld2_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x2_t; - } - _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11300,23 +11568,23 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { + simd_cast(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11325,23 +11593,25 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11350,23 +11620,23 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { + simd_cast(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(scvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11375,23 +11645,25 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ucvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11400,23 +11672,23 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { + simd_cast(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ucvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11425,23 +11697,25 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ucvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11450,23 +11724,23 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { + simd_cast(a) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ucvtf) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11475,693 +11749,792 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_cast(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_s64(transmute(a))) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_s32(a, N) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64.p0i8")] - fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld2_s64(a as *const i8, 8) + _vcvtq_n_f32_s32(a, N) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1i64.p0v1i64" + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" )] - fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld2_s64(a as _) + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_s64(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_s32(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_s32(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_s32(a, N) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_s16(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_s16(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + } + _vcvtq_n_f32_s32(a, N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" )] - fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld3_dup_f32(a as _) + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; } - _vld3q_dup_f32(a as _) + _vcvt_n_f32_u32(a.as_signed(), N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; } - _vld3_dup_s8(a as _) + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld3q_dup_s8(a as _) + _vcvtq_n_f32_u32(a.as_signed(), N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld3_dup_s16(a as _) + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; } - _vld3q_dup_s16(a as _) + _vcvt_n_f32_u32(a.as_signed(), N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; } - _vld3_dup_s32(a as _) + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld3q_dup_s32(a as _) + _vcvtq_n_f32_u32(a.as_signed(), N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] + +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1i64.p0i64" + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; } - _vld3_dup_s64(a as _) + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] - fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; } - _vld3_dup_f32(a as *const i8, 4) + _vcvt_n_s32_f32(a, N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] - fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; } - _vld3q_dup_f32(a as *const i8, 4) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] - fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_dup_s8(a as *const i8, 1) +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_s32_f32(a, N) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] - fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; } - _vld3q_dup_s8(a as *const i8, 1) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_s32_f32(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_s32_f32(a, N) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] - fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; } - _vld3_dup_s16(a as *const i8, 2) + _vcvt_n_u32_f32(a, N).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] - fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; } - _vld3q_dup_s16(a as *const i8, 2) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] - fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; } - _vld3_dup_s32(a as *const i8, 4) + _vcvtq_n_u32_f32(a, N).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] - fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; } - _vld3q_dup_s32(a as *const i8, 4) + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_u32_f32(a, N).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0i8")] - fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; } - _vld3_dup_s64(a as *const i8, 8) + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_u32_f32(a, N).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] + +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -12171,20 +12544,30 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) +pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i32.v2f32" + )] + fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvt_s32_f32(a) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -12194,20 +12577,32 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) +pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i32.v2f32" + )] + fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vcvt_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -12217,20 +12612,30 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) +pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v4i32.v4f32" + )] + fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtq_s32_f32(a) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] + +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -12240,20 +12645,32 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) +pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v4i32.v4f32" + )] + fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vcvtq_s32_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -12263,20 +12680,30 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_dup_s32(transmute(a))) +pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i32.v2f32" + )] + fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvt_u32_f32(a).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -12286,20 +12713,32 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_dup_s32(transmute(a))) +pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i32.v2f32" + )] + fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vcvt_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -12309,20 +12748,30 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) +pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v4i32.v4f32" + )] + fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtq_u32_f32(a).as_unsigned() } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] + +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -12332,687 +12781,613 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) +pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v4i32.v4f32" + )] + fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vcvtq_u32_f32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(sdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) +pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_s32(a, b, transmute(c)) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(sdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" - )] - fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; - } - _vld3_f32(a as _) +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_s32(a, b, transmute(c)) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_lane_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x8_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_u32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_lane_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x8_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_lane_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_u32(a, b, transmute(c)) +} + +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_lane_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" )] - fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; + fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; } - _vld3q_f32(a as _) + _vdot_s32(a, b, c) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" )] - fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; + fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; } - _vld3_s8(a as _) + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x2_t = _vdot_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" )] - fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; + fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } - _vld3q_s8(a as _) + _vdotq_s32(a, b, c) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" )] - fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; + fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } - _vld3_s16(a as _) + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int32x4_t = _vdotq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" )] - fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; + fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; } - _vld3q_s16(a as _) + _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" )] - fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; + fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; } - _vld3_s32(a as _) + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x2_t = _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" )] - fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; + fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } - _vld3q_s32(a as _) + _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] + +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] - fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" + )] + fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } - _vld3_f32(a as *const i8, 4) + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint32x4_t = _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] - fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - _vld3q_f32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 1) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] - fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_s8(a as *const i8, 1) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] - fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - _vld3q_s8(a as *const i8, 1) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] - fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - _vld3_s16(a as *const i8, 2) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] - fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_s16(a as *const i8, 2) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] - fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - _vld3_s32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] - fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - _vld3q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" - )] - fn _vld3_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x3_t; - } - _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" - )] - fn _vld3q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] - fn _vld3_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x3_t; - } - _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" - )] - fn _vld3_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x3_t; - } - _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" - )] - fn _vld3_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x3_t; - } - _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" - )] - fn _vld3q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" - )] - fn _vld3_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x3_t; - } - _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" - )] - fn _vld3q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] - fn _vld3_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x3_t; - } - _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] - fn _vld3_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x3_t; - } - _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] - fn _vld3q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] - fn _vld3_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x3_t; - } - _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] - fn _vld3q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13021,23 +13396,27 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13046,23 +13425,25 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13071,23 +13452,27 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13096,23 +13481,25 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13121,23 +13508,27 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13146,23 +13537,25 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13171,23 +13564,27 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(dup, N = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13196,22 +13593,25 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13220,56 +13620,54 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_s64(transmute(a))) +pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1i64.p0v1i64" - )] - fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; - } - _vld3_s64(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0i8")] - fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; - } - _vld3_s64(a as *const i8, 8) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 1) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13278,21 +13676,27 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_s64(transmute(a))) +pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13301,21 +13705,25 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_s8(transmute(a))) +pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13324,21 +13732,27 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13347,21 +13761,25 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13370,21 +13788,27 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_s16(transmute(a))) +pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13393,21 +13817,25 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_s32(transmute(a))) +pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13416,21 +13844,27 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_s32(transmute(a))) +pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13439,21 +13873,29 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_s8(transmute(a))) +pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13462,21 +13904,31 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: poly16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13485,21 +13937,29 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13508,345 +13968,361 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_s16(transmute(a))) +pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] - fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_dup_f32(a as *const i8, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] - fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - _vld4q_dup_f32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] - fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - _vld4_dup_s8(a as *const i8, 1) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] - fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_dup_s8(a as *const i8, 1) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] - fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_dup_s16(a as *const i8, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] - fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] - fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] - fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" - )] - fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; - } - _vld4_dup_f32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" - )] - fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; - } - _vld4q_dup_f32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" - )] - fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; - } - _vld4_dup_s8(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" - )] - fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; - } - _vld4q_dup_s8(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" - )] - fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; - } - _vld4_dup_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" - )] - fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; - } - _vld4q_dup_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" - )] - fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; - } - _vld4_dup_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" - )] - fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; - } - _vld4q_dup_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64" - )] - fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; - } - _vld4_dup_s64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13855,37 +14331,32 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(nop))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0i8")] - fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_dup_s64(a as *const i8, 8) +pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13894,21 +14365,38 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) +pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13917,21 +14405,32 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) +pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13940,21 +14439,37 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13963,21 +14478,24 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13986,21 +14504,25 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14009,21 +14531,25 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_dup_s32(transmute(a))) +pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14032,21 +14558,27 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_dup_s32(transmute(a))) +pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14055,21 +14587,25 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) +pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14078,21 +14614,27 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14101,21 +14643,25 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14124,680 +14670,458 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" - )] - fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; - } - _vld4_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" - )] - fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; - } - _vld4q_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" - )] - fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; - } - _vld4_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" - )] - fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; - } - _vld4q_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" - )] - fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; - } - _vld4_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" - )] - fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; - } - _vld4q_s16(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" - )] - fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; - } - _vld4_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" - )] - fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; - } - _vld4q_s32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] - fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_f32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] - fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - _vld4q_f32(a as *const i8, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] - fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - _vld4_s8(a as *const i8, 1) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] - fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_s8(a as *const i8, 1) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] - fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_s16(a as *const i8, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] - fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_s16(a as *const i8, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] - fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_s32(a as *const i8, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 4) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] - fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" - )] - fn _vld4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x4_t; - } - _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" - )] - fn _vld4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" - )] - fn _vld4_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x4_t; - } - _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" - )] - fn _vld4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x4_t; - } - _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" - )] - fn _vld4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" - )] - fn _vld4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x4_t; - } - _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" - )] - fn _vld4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x4_t; - } - _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] - fn _vld4_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x4_t; - } - _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] - fn _vld4q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] - fn _vld4_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x4_t; - } - _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] - fn _vld4_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x4_t; - } - _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] - fn _vld4q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] - fn _vld4_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x4_t; - } - _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] - fn _vld4q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x4_t; - } - _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 4) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14806,23 +15130,31 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 4) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14831,23 +15163,29 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 4) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14856,23 +15194,31 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 8) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14881,23 +15227,29 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 8) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14906,23 +15258,31 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 8) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14931,23 +15291,29 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 8) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14956,23 +15322,31 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(dup, N = 8) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14981,22 +15355,29 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15005,56 +15386,65 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_s64(transmute(a))) +pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x8_t = simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0v1i64" - )] - fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; - } - _vld4_s64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0i8")] - fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_s64(a as *const i8, 8) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup, N = 8) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15063,21 +15453,38 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_s64(transmute(a))) +pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15086,21 +15493,32 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15109,21 +15527,38 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15132,21 +15567,32 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15155,21 +15601,38 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15178,21 +15641,25 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_s32(transmute(a))) +pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15201,21 +15668,26 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_s32(transmute(a))) +pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15224,21 +15696,25 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15247,21 +15723,26 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15270,21 +15751,25 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15293,21 +15778,26 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); + let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15316,29 +15806,25 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f32" - )] - fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmax_f32(a, b) +pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15347,29 +15833,26 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f32" - )] - fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmaxq_f32(a, b) +pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); + let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15378,29 +15861,25 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i8" - )] - fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmax_s8(a, b) +pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15409,29 +15888,27 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v16i8" - )] - fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vmaxq_s8(a, b) +pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15440,29 +15917,25 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i16" - )] - fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmax_s16(a, b) +pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] + +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15471,28 +15944,25 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i16" - )] - fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vmaxq_s16(a, b) +pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15502,28 +15972,22 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v2i32" - )] - fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmax_s32(a, b) +pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_xor(a, b) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15533,28 +15997,25 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i32" - )] - fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vmaxq_s32(a, b) +pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15564,28 +16025,22 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i8" - )] - fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_xor(a, b) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15595,28 +16050,29 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v16i8" - )] - fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_xor(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15626,28 +16082,22 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i16" - )] - fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_xor(a, b) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15657,28 +16107,25 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i16" - )] - fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15688,28 +16135,22 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v2i32" - )] - fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_xor(a, b) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15719,28 +16160,25 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i32" - )] - fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15750,28 +16188,22 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmaxnm_f32(a, b) +pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_xor(a, b) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15781,28 +16213,25 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmaxnmq_f32(a, b) +pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15812,28 +16241,22 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f32" - )] - fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmin_f32(a, b) +pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15843,28 +16266,24 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f32" - )] - fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vminq_f32(a, b) +pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15874,28 +16293,22 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i8" - )] - fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmin_s8(a, b) +pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15905,28 +16318,22 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v16i8" - )] - fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vminq_s8(a, b) +pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15936,28 +16343,25 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i16" - )] - fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmin_s16(a, b) +pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15967,28 +16371,22 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i16" - )] - fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vminq_s16(a, b) +pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -15998,28 +16396,25 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v2i32" - )] - fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmin_s32(a, b) +pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16029,28 +16424,22 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i32" - )] - fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vminq_s32(a, b) +pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16060,28 +16449,29 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i8" - )] - fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_xor(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16091,28 +16481,22 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v16i8" - )] - fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16122,28 +16506,25 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i16" - )] - fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16153,28 +16534,22 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i16" - )] - fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_xor(a, b) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16184,28 +16559,25 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v2i32" - )] - fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16215,28 +16587,22 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i32" - )] - fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_xor(a, b) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16246,28 +16612,25 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" - )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vminnm_f32(a, b) +pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16277,28 +16640,22 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vminnmq_f32(a, b) +pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_xor(a, b) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16308,20 +16665,24 @@ pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -16331,22 +16692,23 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_xor(a, b) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(eor) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16355,27 +16717,23 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_xor(a, b) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] + +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(eor) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16384,27 +16742,27 @@ pub unsafe fn vmla_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_xor(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16413,31 +16771,29 @@ pub unsafe fn vmla_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16446,31 +16802,32 @@ pub unsafe fn vmlaq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: float32x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16479,31 +16836,29 @@ pub unsafe fn vmlaq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16512,31 +16867,32 @@ pub unsafe fn vmla_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: int32x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16545,31 +16901,29 @@ pub unsafe fn vmla_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16578,31 +16932,32 @@ pub unsafe fn vmla_laneq_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: uint32x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16611,44 +16966,35 @@ pub unsafe fn vmla_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16657,44 +17003,38 @@ pub unsafe fn vmlaq_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: int8x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16703,44 +17043,35 @@ pub unsafe fn vmlaq_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16749,44 +17080,38 @@ pub unsafe fn vmlaq_laneq_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: int16x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16795,27 +17120,35 @@ pub unsafe fn vmlaq_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16824,27 +17157,38 @@ pub unsafe fn vmla_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: uint8x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16853,27 +17197,35 @@ pub unsafe fn vmla_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16882,27 +17234,38 @@ pub unsafe fn vmla_laneq_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: uint16x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16911,31 +17274,35 @@ pub unsafe fn vmla_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16944,31 +17311,38 @@ pub unsafe fn vmlaq_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: poly8x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16977,31 +17351,35 @@ pub unsafe fn vmlaq_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ext, N = 7) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17010,30 +17388,38 @@ pub unsafe fn vmlaq_laneq_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(N, 3); + let ret_val: poly16x8_t = match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17042,21 +17428,31 @@ pub unsafe fn vmlaq_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmla_f32(a, b, vdup_n_f32(c)) +pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17065,21 +17461,34 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlaq_f32(a, b, vdupq_n_f32(c)) +pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: float32x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17088,21 +17497,31 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmla_s16(a, b, vdup_n_s16(c)) +pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17111,21 +17530,34 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlaq_s16(a, b, vdupq_n_s16(c)) +pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: int16x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17134,21 +17566,31 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmla_u16(a, b, vdup_n_u16(c)) +pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17157,21 +17599,34 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlaq_u16(a, b, vdupq_n_u16(c)) +pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: int32x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17180,21 +17635,31 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmla_s32(a, b, vdup_n_s32(c)) +pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17203,21 +17668,34 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlaq_s32(a, b, vdupq_n_s32(c)) +pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: uint16x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17226,21 +17704,31 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmla_u32(a, b, vdup_n_u32(c)) +pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17249,21 +17737,34 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlaq_u32(a, b, vdupq_n_u32(c)) +pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: uint32x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17272,21 +17773,31 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17295,21 +17806,34 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(N, 2); + let ret_val: poly16x4_t = match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17318,21 +17842,29 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17341,21 +17873,32 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: int64x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17364,21 +17907,29 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17387,21 +17938,32 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + static_assert_uimm_bits!(N, 1); + let ret_val: uint64x2_t = match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + }; + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17410,21 +17972,103 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17433,21 +18077,110 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + static_assert_uimm_bits!(N, 4); + let ret_val: int8x16_t = match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17456,21 +18189,103 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17479,21 +18294,110 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + static_assert_uimm_bits!(N, 4); + let ret_val: uint8x16_t = match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17502,21 +18406,103 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] + +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17525,22 +18511,109 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - simd_add(a, simd_mul(b, c)) +pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + static_assert_uimm_bits!(N, 4); + let ret_val: poly8x16_t = match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + }; + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17549,31 +18622,28 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] + fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + _vfma_f32(b, c, a) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17582,31 +18652,32 @@ pub unsafe fn vmlal_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] +pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] + fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = _vfma_f32(b, c, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17615,27 +18686,28 @@ pub unsafe fn vmlal_laneq_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] + fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vfmaq_f32(b, c, a) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17644,27 +18716,32 @@ pub unsafe fn vmlal_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] + fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vfmaq_f32(b, c, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17673,31 +18750,23 @@ pub unsafe fn vmlal_laneq_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfma_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17706,31 +18775,26 @@ pub unsafe fn vmlal_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32_vfp4(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17739,27 +18803,23 @@ pub unsafe fn vmlal_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] + +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(fmla) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17768,25 +18828,25 @@ pub unsafe fn vmlal_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17796,20 +18856,23 @@ pub unsafe fn vmlal_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlal_s16(a, b, vdup_n_s16(c)) +pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let b: float32x2_t = simd_neg(b); + vfma_f32(a, b, c) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17819,20 +18882,27 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlal_s32(a, b, vdup_n_s32(c)) +pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let b: float32x2_t = simd_neg(b); + let ret_val: float32x2_t = vfma_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17842,20 +18912,23 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlal_u16(a, b, vdup_n_u16(c)) +pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let b: float32x4_t = simd_neg(b); + vfmaq_f32(a, b, c) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] + +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17865,20 +18938,27 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlal_u32(a, b, vdup_n_u32(c)) +pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let b: float32x4_t = simd_neg(b); + let ret_val: float32x4_t = vfmaq_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17888,20 +18968,22 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - simd_add(a, vmull_s8(b, c)) +pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfms_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17911,20 +18993,25 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - simd_add(a, vmull_s16(b, c)) +pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32_vfp4(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17934,20 +19021,22 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - simd_add(a, vmull_s32(b, c)) +pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] + +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -17957,20 +19046,25 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - simd_add(a, vmull_u8(b, c)) +pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17980,20 +19074,30 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - simd_add(a, vmull_u16(b, c)) +pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] + fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhadd_s8(a, b) } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -18003,20 +19107,33 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - simd_add(a, vmull_u32(b, c)) +pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] + fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vhadd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -18026,20 +19143,30 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] + fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhaddq_s8(a, b) } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -18049,22 +19176,38 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] + fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vhaddq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18073,27 +19216,31 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] + fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhadd_s16(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18102,27 +19249,34 @@ pub unsafe fn vmls_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] + fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vhadd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18131,31 +19285,31 @@ pub unsafe fn vmls_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] + fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhaddq_s16(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18164,31 +19318,34 @@ pub unsafe fn vmlsq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] + fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vhaddq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18197,31 +19354,31 @@ pub unsafe fn vmlsq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] + fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhadd_s32(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18230,31 +19387,34 @@ pub unsafe fn vmls_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] +pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] + fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vhadd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18263,31 +19423,31 @@ pub unsafe fn vmls_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] + fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhaddq_s32(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(shadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18296,31 +19456,34 @@ pub unsafe fn vmls_laneq_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] + fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vhaddq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18329,44 +19492,31 @@ pub unsafe fn vmls_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] + fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18375,44 +19525,34 @@ pub unsafe fn vmlsq_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] + fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18421,44 +19561,31 @@ pub unsafe fn vmlsq_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] + fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18467,44 +19594,38 @@ pub unsafe fn vmlsq_laneq_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), +pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] + fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18513,27 +19634,31 @@ pub unsafe fn vmlsq_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] + fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18542,27 +19667,34 @@ pub unsafe fn vmls_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] + fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18571,27 +19703,31 @@ pub unsafe fn vmls_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] + fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18600,27 +19736,34 @@ pub unsafe fn vmls_laneq_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] + fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18629,31 +19772,31 @@ pub unsafe fn vmls_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] + fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18662,31 +19805,34 @@ pub unsafe fn vmlsq_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] + fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18695,31 +19841,31 @@ pub unsafe fn vmlsq_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] + fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] + +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(uhadd) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18728,29 +19874,33 @@ pub unsafe fn vmlsq_laneq_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] + fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18760,20 +19910,30 @@ pub unsafe fn vmlsq_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmls_f32(a, b, vdup_n_f32(c)) +pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] + fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhsub_s16(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18783,20 +19943,33 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlsq_f32(a, b, vdupq_n_f32(c)) +pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] + fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vhsub_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18806,20 +19979,30 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmls_s16(a, b, vdup_n_s16(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] +pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] + fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhsubq_s16(a, b) +} + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18829,20 +20012,33 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlsq_s16(a, b, vdupq_n_s16(c)) +pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] + fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vhsubq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18852,20 +20048,30 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmls_u16(a, b, vdup_n_u16(c)) +pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] + fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhsub_s32(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18875,20 +20081,33 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlsq_u16(a, b, vdupq_n_u16(c)) +pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] + fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vhsub_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18898,20 +20117,30 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmls_s32(a, b, vdup_n_s32(c)) +pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] + fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhsubq_s32(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18921,20 +20150,33 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlsq_s32(a, b, vdupq_n_s32(c)) +pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] + fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vhsubq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18944,20 +20186,30 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmls_u32(a, b, vdup_n_u32(c)) +pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] + fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhsub_s8(a, b) } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18967,20 +20219,33 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlsq_u32(a, b, vdupq_n_u32(c)) +pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] + fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vhsub_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -18990,20 +20255,30 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] + fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhsubq_s8(a, b) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19013,20 +20288,37 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] + fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vhsubq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19036,20 +20328,30 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] + fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19059,20 +20361,33 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] + fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19082,20 +20397,30 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] + fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19105,20 +20430,37 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] + fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19128,20 +20470,30 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] + fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19151,20 +20503,33 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] + fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19174,20 +20539,30 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] + fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19197,20 +20572,33 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] + fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19220,20 +20608,30 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] + fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -19243,22 +20641,34 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - simd_sub(a, simd_mul(b, c)) +pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] + fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(uhsub) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19267,31 +20677,31 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] + fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] + +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(uhsub) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19300,122 +20710,564 @@ pub unsafe fn vmlsl_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] + fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + transmute(vld1_v2f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + let ret_val: float32x2_t = transmute(vld1_v2f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + transmute(vld1q_v4f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + let ret_val: float32x4_t = transmute(vld1q_v4f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + let ret_val: uint8x16_t = transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + let ret_val: uint16x8_t = transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + transmute(vld1_v2i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(vld1_v2i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + transmute(vld1q_v4i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + let ret_val: uint32x4_t = transmute(vld1q_v4i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { + transmute(vld1_v1i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + let ret_val: uint64x2_t = transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + let ret_val: poly8x16_t = transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + let ret_val: poly16x8_t = transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + transmute(vld1_v1i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + let ret_val: poly64x2_t = transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19424,31 +21276,31 @@ pub unsafe fn vmlsl_lane_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] + fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; + } + _vld1_f32_x2(a) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19457,27 +21309,34 @@ pub unsafe fn vmlsl_laneq_u16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] + fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; + } + let mut ret_val: float32x2x2_t = _vld1_f32_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19486,25 +21345,30 @@ pub unsafe fn vmlsl_lane_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] + fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; + } + _vld1_f32_x3(a) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19514,20 +21378,34 @@ pub unsafe fn vmlsl_laneq_u32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlsl_s16(a, b, vdup_n_s16(c)) +pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] + fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; + } + let mut ret_val: float32x2x3_t = _vld1_f32_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19537,20 +21415,30 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlsl_s32(a, b, vdup_n_s32(c)) +pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] + fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; + } + _vld1_f32_x4(a) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19560,20 +21448,35 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlsl_u16(a, b, vdup_n_u16(c)) +pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] + fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; + } + let mut ret_val: float32x2x4_t = _vld1_f32_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19583,20 +21486,30 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlsl_u32(a, b, vdup_n_u32(c)) +pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] + fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; + } + _vld1q_f32_x2(a) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19606,20 +21519,33 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - simd_sub(a, vmull_s8(b, c)) +pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] + fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; + } + let mut ret_val: float32x4x2_t = _vld1q_f32_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19629,20 +21555,30 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - simd_sub(a, vmull_s16(b, c)) +pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] + fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; + } + _vld1q_f32_x3(a) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19652,20 +21588,34 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - simd_sub(a, vmull_s32(b, c)) +pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] + fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; + } + let mut ret_val: float32x4x3_t = _vld1q_f32_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19675,20 +21625,30 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - simd_sub(a, vmull_u8(b, c)) +pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] + fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; + } + _vld1q_f32_x4(a) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19698,20 +21658,34 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - simd_sub(a, vmull_u16(b, c)) +pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] + fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; + } + let mut ret_val: float32x4x4_t = _vld1q_f32_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19721,20 +21695,21 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - simd_sub(a, vmull_u32(b, c)) +pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { + transmute(vld1_s64_x2(transmute(a))) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19744,20 +21719,21 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_mul(a, b) +pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { + transmute(vld1_s64_x3(transmute(a))) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -19767,22 +21743,23 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_mul(a, b) +pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { + transmute(vld1_s64_x4(transmute(a))) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19791,23 +21768,23 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { + transmute(vld1q_s64_x2(transmute(a))) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19816,23 +21793,26 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19841,26 +21821,23 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { + transmute(vld1q_s64_x3(transmute(a))) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19869,26 +21846,27 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19897,26 +21875,23 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { + transmute(vld1q_s64_x4(transmute(a))) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19925,39 +21900,263 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), +pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + let ret_val: int8x8_t = vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + let ret_val: int8x16_t = vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + let ret_val: int16x4_t = vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + let ret_val: int16x8_t = vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + let ret_val: int32x2_t = vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + let ret_val: int32x4_t = vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { + vld1_v1i64(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + let ret_val: int64x2_t = vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19966,23 +22165,31 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] + fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; + } + _vld1_s8_x2(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19991,26 +22198,34 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] + fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; + } + let mut ret_val: int8x8x2_t = _vld1_s8_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20019,26 +22234,31 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] + fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; + } + _vld1_s8_x3(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20047,39 +22267,35 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] + fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; + } + let mut ret_val: int8x8x3_t = _vld1_s8_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20088,23 +22304,31 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] + fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; + } + _vld1_s8_x4(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20113,26 +22337,36 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] + fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; + } + let mut ret_val: int8x8x4_t = _vld1_s8_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20141,26 +22375,31 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] + fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; + } + _vld1q_s8_x2(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20169,39 +22408,42 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] + fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; + } + let mut ret_val: int8x16x2_t = _vld1q_s8_x2(a); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20210,23 +22452,31 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] + fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; + } + _vld1q_s8_x3(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20235,26 +22485,47 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] + fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; + } + let mut ret_val: int8x16x3_t = _vld1q_s8_x3(a); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20263,26 +22534,31 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] + fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; + } + _vld1q_s8_x4(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20291,39 +22567,52 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) +pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] + fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; + } + let mut ret_val: int8x16x4_t = _vld1q_s8_x4(a); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20332,23 +22621,31 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] + fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; + } + _vld1_s16_x2(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20357,24 +22654,33 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] + fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; + } + let mut ret_val: int16x4x2_t = _vld1_s16_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20384,20 +22690,30 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { - simd_mul(a, vdup_n_f32(b)) +pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] + fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; + } + _vld1_s16_x3(a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20407,20 +22723,34 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { - simd_mul(a, vdupq_n_f32(b)) +pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] + fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; + } + let mut ret_val: int16x4x3_t = _vld1_s16_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20430,20 +22760,30 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - simd_mul(a, vdup_n_s16(b)) +pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] + fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; + } + _vld1_s16_x4(a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20453,20 +22793,35 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - simd_mul(a, vdupq_n_s16(b)) +pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] + fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; + } + let mut ret_val: int16x4x4_t = _vld1_s16_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20476,20 +22831,30 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - simd_mul(a, vdup_n_s32(b)) +pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] + fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; + } + _vld1q_s16_x2(a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20499,20 +22864,33 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - simd_mul(a, vdupq_n_s32(b)) +pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] + fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; + } + let mut ret_val: int16x8x2_t = _vld1q_s16_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20522,20 +22900,30 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { - simd_mul(a, vdup_n_u16(b)) +pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] + fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; + } + _vld1q_s16_x3(a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20545,20 +22933,34 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { - simd_mul(a, vdupq_n_u16(b)) +pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] + fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; + } + let mut ret_val: int16x8x3_t = _vld1q_s16_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20568,20 +22970,30 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { - simd_mul(a, vdup_n_u32(b)) +pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] + fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; + } + _vld1q_s16_x4(a) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20591,20 +23003,35 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { - simd_mul(a, vdupq_n_u32(b)) +pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] + fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; + } + let mut ret_val: int16x8x4_t = _vld1q_s16_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20614,28 +23041,30 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] +pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v8i8" + link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" )] - fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] + fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; } - _vmul_p8(a, b) + _vld1_s32_x2(a) } -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20645,28 +23074,33 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] +pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v16i8" + link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" )] - fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] + fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; } - _vmulq_p8(a, b) + let mut ret_val: int32x2x2_t = _vld1_s32_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20676,20 +23110,30 @@ pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_mul(a, b) +pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] + fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; + } + _vld1_s32_x3(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20699,20 +23143,34 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_mul(a, b) +pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] + fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; + } + let mut ret_val: int32x2x3_t = _vld1_s32_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20722,20 +23180,30 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_mul(a, b) +pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] + fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; + } + _vld1_s32_x4(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20745,20 +23213,35 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_mul(a, b) +pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] + fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; + } + let mut ret_val: int32x2x4_t = _vld1_s32_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20768,20 +23251,30 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] + fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; + } + _vld1q_s32_x2(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20791,20 +23284,33 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] + fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; + } + let mut ret_val: int32x4x2_t = _vld1q_s32_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20814,20 +23320,30 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] + fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; + } + _vld1q_s32_x3(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20837,20 +23353,34 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] + fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; + } + let mut ret_val: int32x4x3_t = _vld1q_s32_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20860,20 +23390,30 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] + fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; + } + _vld1q_s32_x4(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20883,20 +23423,34 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_mul(a, b) +pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] + fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; + } + let mut ret_val: int32x4x4_t = _vld1q_s32_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20906,20 +23460,29 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_mul(a, b) +pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0i64")] + fn _vld1_s64_x2(a: *const i64) -> int64x1x2_t; + } + _vld1_s64_x2(a) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -20929,22 +23492,30 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_mul(a, b) +pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0i64")] + fn _vld1_s64_x3(a: *const i64) -> int64x1x3_t; + } + _vld1_s64_x3(a) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20953,26 +23524,31 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0i64")] + fn _vld1_s64_x4(a: *const i64) -> int64x1x4_t; + } + _vld1_s64_x4(a) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20981,26 +23557,31 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] + fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; + } + _vld1q_s64_x2(a) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21009,23 +23590,34 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] + fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; + } + let mut ret_val: int64x2x2_t = _vld1q_s64_x2(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21034,23 +23626,31 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] + fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + } + _vld1q_s64_x3(a) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21059,26 +23659,35 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] + fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + } + let mut ret_val: int64x2x3_t = _vld1q_s64_x3(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21087,26 +23696,31 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) +pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] + fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + } + _vld1q_s64_x4(a) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21115,23 +23729,36 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] + fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + } + let mut ret_val: int64x2x4_t = _vld1q_s64_x4(a); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21140,21 +23767,22 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21164,20 +23792,25 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vmull_s16(a, vdup_n_s16(b)) +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld1_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21187,20 +23820,22 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vmull_s32(a, vdup_n_s32(b)) +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21210,20 +23845,26 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { - vmull_u16(a, vdup_n_u16(b)) +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld1_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21233,20 +23874,22 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { - vmull_u32(a, vdup_n_u32(b)) +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) } -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21256,28 +23899,27 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] - fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; - } - _vmull_p8(a, b) +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld1_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21287,28 +23929,22 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] - fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - _vmull_s16(a, b) +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21318,28 +23954,33 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] - fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - _vmull_s32(a, b) +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21349,28 +23990,22 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] - fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - _vmull_s8(a, b) +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21380,28 +24015,38 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] - fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21411,28 +24056,22 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] - fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21442,28 +24081,43 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] - fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21473,20 +24127,22 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { - simd_neg(a) +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21496,20 +24152,25 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { - simd_neg(a) +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld1_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21519,20 +24180,22 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { - simd_neg(a) +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21542,20 +24205,26 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { - simd_neg(a) +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld1_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21565,20 +24234,22 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { - simd_neg(a) +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21588,20 +24259,27 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { - simd_neg(a) +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld1_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21611,20 +24289,22 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { - simd_neg(a) +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21634,20 +24314,25 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { - simd_neg(a) +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21657,20 +24342,22 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_or(a, b) +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21680,20 +24367,26 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_or(a, b) +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21703,20 +24396,22 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_or(a, b) +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21726,20 +24421,27 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_or(a, b) +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21749,20 +24451,22 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + transmute(vld1_s32_x2(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21772,20 +24476,25 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld1_s32_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21795,20 +24504,22 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + transmute(vld1_s32_x3(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21818,20 +24529,26 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld1_s32_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21841,20 +24558,22 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + transmute(vld1_s32_x4(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21864,20 +24583,27 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_or(a, b) +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld1_s32_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21887,20 +24613,22 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + transmute(vld1q_s32_x2(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21910,20 +24638,25 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld1q_s32_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21933,20 +24666,22 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + transmute(vld1q_s32_x3(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21956,20 +24691,26 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld1q_s32_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -21979,20 +24720,22 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + transmute(vld1q_s32_x4(transmute(a))) } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22002,20 +24745,26 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_or(a, b) +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld1q_s32_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(faddp) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22025,28 +24774,21 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f32" - )] - fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpadd_f32(a, b) +pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { + transmute(vld1_s64_x2(transmute(a))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22056,28 +24798,21 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] - fn _vqabs_s8(a: int8x8_t) -> int8x8_t; - } - _vqabs_s8(a) +pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { + transmute(vld1_s64_x3(transmute(a))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22087,28 +24822,22 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] - fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; - } - _vqabsq_s8(a) +pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { + transmute(vld1_s64_x4(transmute(a))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22118,28 +24847,22 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] - fn _vqabs_s16(a: int16x4_t) -> int16x4_t; - } - _vqabs_s16(a) +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + transmute(vld1q_s64_x2(transmute(a))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22149,28 +24872,25 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] - fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; - } - _vqabsq_s16(a) +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22180,28 +24900,22 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] - fn _vqabs_s32(a: int32x2_t) -> int32x2_t; - } - _vqabs_s32(a) +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + transmute(vld1q_s64_x3(transmute(a))) } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22211,28 +24925,26 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] - fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; - } - _vqabsq_s32(a) +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22242,28 +24954,22 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] - fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqadd_s8(a, b) +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + transmute(vld1q_s64_x4(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22273,28 +24979,27 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] - fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqaddq_s8(a, b) +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22304,28 +25009,22 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] - fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqadd_s16(a, b) +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22335,28 +25034,25 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] - fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqaddq_s16(a, b) +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld1_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22366,28 +25062,22 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] - fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqadd_s32(a, b) +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22397,28 +25087,26 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] - fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqaddq_s32(a, b) +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld1_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22428,28 +25116,22 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] - fn _vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqadd_s64(a, b) +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22459,28 +25141,27 @@ pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] - fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqaddq_s64(a, b) +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld1_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22490,28 +25171,22 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] - fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22521,28 +25196,33 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] - fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22552,28 +25232,22 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] - fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22583,28 +25257,38 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] - fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22614,28 +25298,22 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] - fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22645,28 +25323,43 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] - fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22676,28 +25369,22 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] - fn _vqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22707,30 +25394,26 @@ pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] - fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld1_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22739,27 +25422,23 @@ pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s32(a, vqdmull_lane_s16::(b, c)) +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22768,25 +25447,26 @@ pub unsafe fn vqdmlal_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqaddq_s64(a, vqdmull_lane_s32::(b, c)) +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld1_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22796,20 +25476,22 @@ pub unsafe fn vqdmlal_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqaddq_s32(a, vqdmull_n_s16(b, c)) +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22819,20 +25501,27 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqaddq_s64(a, vqdmull_n_s32(b, c)) +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld1_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22842,20 +25531,22 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqaddq_s32(a, vqdmull_s16(b, c)) +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22865,22 +25556,26 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqaddq_s64(a, vqdmull_s32(b, c)) +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22889,27 +25584,23 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s32(a, vqdmull_lane_s16::(b, c)) +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -22918,25 +25609,26 @@ pub unsafe fn vqdmlsl_lane_s16( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqsubq_s64(a, vqdmull_lane_s32::(b, c)) +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22946,20 +25638,22 @@ pub unsafe fn vqdmlsl_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqsubq_s32(a, vqdmull_n_s16(b, c)) +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -22969,1039 +25663,1069 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqsubq_s64(a, vqdmull_n_s32(b, c)) +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v1i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqsubq_s32(a, vqdmull_s16(b, c)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; + } + _vld1_v1i64(a, b) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqsubq_s64(a, vqdmull_s32(b, c)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] + fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; + } + _vld1_v2f32(a, b) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] + fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; + } + let ret_val: float32x2_t = _vld1_v2f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] + fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; + } + _vld1_v2i32(a, b) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] + fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; + } + let ret_val: int32x2_t = _vld1_v2i32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] + fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; + } + _vld1_v4i16(a, b) } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let b: int16x4_t = vdup_n_s16(b); - vqdmulh_s16(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] + fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; + } + let ret_val: int16x4_t = _vld1_v4i16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let b: int16x8_t = vdupq_n_s16(b); - vqdmulhq_s16(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] + fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; + } + _vld1_v8i8(a, b) } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let b: int32x2_t = vdup_n_s32(b); - vqdmulh_s32(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] + fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; + } + let ret_val: int8x8_t = _vld1_v8i8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let b: int32x4_t = vdupq_n_s32(b); - vqdmulhq_s32(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] + fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; + } + _vld1q_v16i8(a, b) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i16" - )] - fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] + fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; } - _vqdmulh_s16(a, b) + let ret_val: int8x16_t = _vld1q_v16i8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v8i16" - )] - fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] + fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; } - _vqdmulhq_s16(a, b) + _vld1q_v2i64(a, b) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v2i32" - )] - fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] + fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; } - _vqdmulh_s32(a, b) + let ret_val: int64x2_t = _vld1q_v2i64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i32" - )] - fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] + fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; } - _vqdmulhq_s32(a, b) + _vld1q_v4f32(a, b) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] + fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; + } + let ret_val: float32x4_t = _vld1q_v4f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] + fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; + } + _vld1q_v4i32(a, b) } -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vqdmull_s16(a, vdup_n_s16(b)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] + fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; + } + let ret_val: int32x4_t = _vld1q_v4i32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vqdmull_s32(a, vdup_n_s32(b)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] + fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; + } + _vld1q_v8i16(a, b) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] + +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v4i32" - )] - fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] + fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; } - _vqdmull_s16(a, b) + let ret_val: int16x8_t = _vld1q_v8i16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v2i64" - )] - fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] + fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vqdmull_s32(a, b) + _vld2_dup_f32(a as *const i8, 4) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v8i8" - )] - fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] + fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vqmovn_s16(a) + let mut ret_val: float32x2x2_t = _vld2_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v4i16" - )] - fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] + fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vqmovn_s32(a) + _vld2q_dup_f32(a as *const i8, 4) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v2i32" - )] - fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] + fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vqmovn_s64(a) + let mut ret_val: float32x4x2_t = _vld2q_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v8i8" - )] - fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] + fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vqmovn_u16(a.as_signed()).as_unsigned() + _vld2_dup_s8(a as *const i8, 1) } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v4i16" - )] - fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] + fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vqmovn_u32(a.as_signed()).as_unsigned() + let mut ret_val: int8x8x2_t = _vld2_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] + fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; + } + _vld2q_dup_s8(a as *const i8, 1) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] + fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; + } + let mut ret_val: int8x16x2_t = _vld2q_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] + fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; + } + _vld2_dup_s16(a as *const i8, 2) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] + fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; + } + let mut ret_val: int16x4x2_t = _vld2_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] + fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + _vld2q_dup_s16(a as *const i8, 2) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] + fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + let mut ret_val: int16x8x2_t = _vld2q_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] + fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + _vld2_dup_s32(a as *const i8, 4) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] + fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + let mut ret_val: int32x2x2_t = _vld2_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] + fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + _vld2q_dup_s32(a as *const i8, 4) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] + fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + let mut ret_val: int32x4x2_t = _vld2q_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v2i32" + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" )] - fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; + fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; } - _vqmovn_u64(a.as_signed()).as_unsigned() + _vld2_dup_f32(a as _) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v8i8" + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" )] - fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; + fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; } - _vqmovun_s16(a).as_unsigned() + let mut ret_val: float32x2x2_t = _vld2_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v4i16" + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" )] - fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; + fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; } - _vqmovun_s32(a).as_unsigned() + _vld2q_dup_f32(a as _) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v2i32" + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" )] - fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; + fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; } - _vqmovun_s64(a).as_unsigned() + let mut ret_val: float32x4x2_t = _vld2q_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i8" + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] - fn _vqneg_s8(a: int8x8_t) -> int8x8_t; + fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; } - _vqneg_s8(a) + _vld2_dup_s8(a as _) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v16i8" + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] - fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; + fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; } - _vqnegq_s8(a) + let mut ret_val: int8x8x2_t = _vld2_dup_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i16" + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] - fn _vqneg_s16(a: int16x4_t) -> int16x4_t; + fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; } - _vqneg_s16(a) + _vld2q_dup_s8(a as _) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i16" + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] - fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; + fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; } - _vqnegq_s16(a) + let mut ret_val: int8x16x2_t = _vld2q_dup_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i32" + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] - fn _vqneg_s32(a: int32x2_t) -> int32x2_t; + fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; } - _vqneg_s32(a) + _vld2_dup_s16(a as _) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i32" + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] - fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; + fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; } - _vqnegq_s32(a) + let mut ret_val: int16x4x2_t = _vld2_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulh_s16(a, b) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" + )] + fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; + } + _vld2q_dup_s16(a as _) } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" + )] + fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; + } + let mut ret_val: int16x8x2_t = _vld2q_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" + )] + fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; + } + _vld2_dup_s32(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" + )] + fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; + } + let mut ret_val: int32x2x2_t = _vld2_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" + )] + fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; + } + _vld2q_dup_s32(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" + )] + fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; + } + let mut ret_val: int32x4x2_t = _vld2q_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24010,24 +26734,59 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - vqrdmulh_s32(a, b) +pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0i8")] + fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; + } + _vld2_dup_s64(a as *const i8, 8) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v1i64.p0i64" + )] + fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; + } + _vld2_dup_s64(a as _) +} + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24036,24 +26795,23 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulh_s16(a, b) +pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24062,24 +26820,23 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - vqrdmulh_s32(a, b) +pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24088,37 +26845,26 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmulhq_s16(a, b) +pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld2_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24127,24 +26873,23 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulhq_s32(a, b) +pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24153,37 +26898,34 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] +pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ); - vqrdmulhq_s16(a, b) + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24192,22 +26934,22 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulhq_s32(a, b) +pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24217,20 +26959,25 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - vqrdmulh_s16(a, vdup_n_s16(b)) +pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld2_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24240,20 +26987,22 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - vqrdmulhq_s16(a, vdupq_n_s16(b)) +pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24263,20 +27012,25 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - vqrdmulh_s32(a, vdup_n_s32(b)) +pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24286,20 +27040,22 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - vqrdmulhq_s32(a, vdupq_n_s32(b)) +pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_dup_s32(transmute(a))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24309,28 +27065,25 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" - )] - fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqrdmulh_s16(a, b) +pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld2_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24340,28 +27093,22 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" - )] - fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqrdmulhq_s16(a, b) +pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_dup_s32(transmute(a))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24371,28 +27118,25 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" - )] - fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqrdmulh_s32(a, b) +pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld2q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24402,28 +27146,22 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" - )] - fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqrdmulhq_s32(a, b) +pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24433,28 +27171,25 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i8" - )] - fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqrshl_s8(a, b) +pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld2_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24464,28 +27199,22 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v16i8" - )] - fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqrshlq_s8(a, b) +pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24495,28 +27224,33 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i16" - )] - fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqrshl_s16(a, b) +pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24526,28 +27260,22 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i16" - )] - fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqrshlq_s16(a, b) +pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24557,28 +27285,25 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i32" - )] - fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqrshl_s32(a, b) +pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld2_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24588,28 +27313,22 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i32" - )] - fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqrshlq_s32(a, b) +pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] + +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24619,1318 +27338,1477 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v1i64" - )] - fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; +pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] + fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vqrshl_s64(a, b) + _vld2_f32(a as *const i8, 4) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i64" - )] - fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] + fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vqrshlq_s64(a, b) + let mut ret_val: float32x2x2_t = _vld2_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i8" - )] - fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] + fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vqrshl_u8(a.as_signed(), b).as_unsigned() + _vld2q_f32(a as *const i8, 4) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v16i8" - )] - fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] + fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vqrshlq_u8(a.as_signed(), b).as_unsigned() + let mut ret_val: float32x4x2_t = _vld2q_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i16" - )] - fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] + fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vqrshl_u16(a.as_signed(), b).as_unsigned() + _vld2_s8(a as *const i8, 1) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i16" - )] - fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] + fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vqrshlq_u16(a.as_signed(), b).as_unsigned() + let mut ret_val: int8x8x2_t = _vld2_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i32" - )] - fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] + fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } - _vqrshl_u32(a.as_signed(), b).as_unsigned() + _vld2q_s8(a as *const i8, 1) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i32" - )] - fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] + fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } - _vqrshlq_u32(a.as_signed(), b).as_unsigned() + let mut ret_val: int8x16x2_t = _vld2q_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v1i64" - )] - fn _vqrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] + fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } - _vqrshl_u64(a.as_signed(), b).as_unsigned() + _vld2_s16(a as *const i8, 2) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i64" - )] - fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] + fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } - _vqrshlq_u64(a.as_signed(), b).as_unsigned() + let mut ret_val: int16x4x2_t = _vld2_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] + fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + _vld2q_s16(a as *const i8, 2) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] - fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] + fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } - _vqrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) + let mut ret_val: int16x8x2_t = _vld2q_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] + fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + _vld2_s32(a as *const i8, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] - fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] + fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } - _vqrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) + let mut ret_val: int32x2x2_t = _vld2_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] + fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + _vld2q_s32(a as *const i8, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] - fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] + fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } - _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) + let mut ret_val: int32x4x2_t = _vld2q_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" )] - fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; } - _vqrshrn_n_s16(a, N) + _vld2_f32(a as _) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" )] - fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; } - _vqrshrn_n_s32(a, N) + let mut ret_val: float32x2x2_t = _vld2_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" )] - fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; } - _vqrshrn_n_s64(a, N) + _vld2q_f32(a as _) } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] - fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" + )] + fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; } - _vqrshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned() + let mut ret_val: float32x4x2_t = _vld2q_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] - fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" + )] + fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; } - _vqrshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned() + _vld2_s8(a as _) } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] - fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" + )] + fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; } - _vqrshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() + let mut ret_val: int8x8x2_t = _vld2_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v8i8" + link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" )] - fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; } - _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() + _vld2q_s8(a as _) } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v4i16" + link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" )] - fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; } - _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() + let mut ret_val: int8x16x2_t = _vld2q_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v2i32" + link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" )] - fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; } - _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() + _vld2_s16(a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] - fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" + )] + fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; } - _vqrshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned() + let mut ret_val: int16x4x2_t = _vld2_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] - fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" + )] + fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; } - _vqrshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned() + _vld2q_s16(a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] - fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" + )] + fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; } - _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() + let mut ret_val: int16x8x2_t = _vld2q_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v8i8" + link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" )] - fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; } - _vqrshrun_n_s16(a, N).as_unsigned() + _vld2_s32(a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v4i16" + link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" )] - fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; } - _vqrshrun_n_s32(a, N).as_unsigned() + let mut ret_val: int32x2x2_t = _vld2_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v2i32" + link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" )] - fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; } - _vqrshrun_n_s64(a, N).as_unsigned() + _vld2q_s32(a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_s8(a, vdup_n_s8(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" + )] + fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; + } + let mut ret_val: int32x4x2_t = _vld2q_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_s8(a, vdupq_n_s8(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" + )] + fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; + } + _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_s16(a, vdup_n_s16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" + )] + fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: float32x2x2_t = _vld2_lane_f32(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_s16(a, vdupq_n_s16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" + )] + fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) + -> float32x4x2_t; + } + _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_s32(a, vdup_n_s32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" + )] + fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) + -> float32x4x2_t; + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: float32x4x2_t = _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_s32(a, vdupq_n_s32(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_s64(a, vdup_n_s64(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_s64(a, vdupq_n_s64(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_u8(a, vdup_n_s8(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_u8(a, vdupq_n_s8(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_u16(a, vdup_n_s16(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_u16(a, vdupq_n_s16(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_u32(a, vdup_n_s32(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_u32(a, vdupq_n_s32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" + )] + fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; + } + _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_u64(a, vdup_n_s64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" + )] + fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x2_t = _vld2_lane_s8(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_u64(a, vdupq_n_s64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" + )] + fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; + } + _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i8" + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" )] - fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; } - _vqshl_s8(a, b) + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: int16x4x2_t = _vld2_lane_s16(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v16i8" + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" )] - fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; } - _vqshlq_s8(a, b) + _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i16" + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" )] - fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; } - _vqshl_s16(a, b) + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x2_t = _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i16" + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" )] - fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; } - _vqshlq_s16(a, b) + _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i32" + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" )] - fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; } - _vqshl_s32(a, b) + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: int32x2x2_t = _vld2_lane_s32(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i32" + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" )] - fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; } - _vqshlq_s32(a, b) + _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v1i64" + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" )] - fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; } - _vqshl_s64(a, b) + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: int32x4x2_t = _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] + fn _vld2_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x2_t; + } + _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] + fn _vld2_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x2_t; + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: float32x2x2_t = _vld2_lane_f32(a as _, b.0, b.1, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] + fn _vld2q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x2_t; + } + _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] + fn _vld2q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x2_t; + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: float32x4x2_t = _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] + fn _vld2q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x2_t; + } + _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] + fn _vld2q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x2_t; + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x2_t = _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] + fn _vld2q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x2_t; + } + _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] + fn _vld2q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x2_t; + } + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: int32x4x2_t = _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] + fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) + -> int8x8x2_t; + } + _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] + fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) + -> int8x8x2_t; + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x2_t = _vld2_lane_s8(a as _, b.0, b.1, LANE, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] + fn _vld2_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x2_t; + } + _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] + fn _vld2_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x2_t; + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: int16x4x2_t = _vld2_lane_s16(a as _, b.0, b.1, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] + fn _vld2_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x2_t; + } + _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] + fn _vld2_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x2_t; + } + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: int32x2x2_t = _vld2_lane_s32(a as _, b.0, b.1, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25939,29 +28817,25 @@ pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i64" - )] - fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqshlq_s64(a, b) +pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25970,29 +28844,31 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i8" - )] - fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqshl_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint8x8x2_t = transmute(vld2_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26001,29 +28877,25 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v16i8" - )] - fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqshlq_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26032,29 +28904,31 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i16" - )] - fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqshl_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: uint16x4x2_t = transmute(vld2_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26063,29 +28937,25 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i16" - )] - fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqshlq_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26094,29 +28964,31 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i32" - )] - fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqshl_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint16x8x2_t = transmute(vld2q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26125,29 +28997,25 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i32" - )] - fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqshlq_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26156,29 +29024,31 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v1i64" - )] - fn _vqshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqshl_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + let mut ret_val: uint32x2x2_t = transmute(vld2_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26187,799 +29057,399 @@ pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i64" - )] - fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqshlq_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; - } - _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: uint32x4x2_t = transmute(vld2q_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly8x8x2_t = transmute(vld2_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + let mut ret_val: poly16x4x2_t = transmute(vld2_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned() +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; - } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly16x8x2_t = transmute(vld2q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_s64(transmute(a))) +} + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64.p0i8")] + fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() + _vld2_s64(a as *const i8, 8) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i8" + link_name = "llvm.aarch64.neon.ld2.v1i64.p0v1i64" )] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; } - _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() + _vld2_s64(a as _) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v16i8" - )] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_s64(transmute(a))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i16" - )] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned() +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_s8(transmute(a))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i16" - )] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned() +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld2_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i32" - )] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_s8(transmute(a))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i32" - )] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v1i64" - )] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; - } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i64" - )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; - } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] - fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] - fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] - fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v8i8" - )] - fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrn_n_s16(a, N) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v4i16" - )] - fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrn_n_s32(a, N) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v2i32" - )] - fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrn_n_s64(a, N) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v8i8" - )] - fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrn_n_u16(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v4i16" - )] - fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrn_n_u32(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v2i32" - )] - fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrn_n_u64(a.as_signed(), N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v8i8" - )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrun_n_s16(a, N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v4i16" - )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrun_n_s32(a, N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v2i32" - )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrun_n_s64(a, N).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -26989,28 +29459,33 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i8")] - fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqsub_s8(a, b) +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld2q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27020,28 +29495,22 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.16i8")] - fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqsubq_s8(a, b) +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_s16(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27051,28 +29520,25 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i16")] - fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqsub_s16(a, b) +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld2_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27082,28 +29548,22 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i16")] - fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqsubq_s16(a, b) +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27113,28 +29573,25 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i32")] - fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqsub_s32(a, b) +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld2q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27144,28 +29601,22 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i32")] - fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqsubq_s32(a, b) +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_s32(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27175,28 +29626,25 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.1i64")] - fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqsub_s64(a, b) +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld2_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27206,28 +29654,22 @@ pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i64")] - fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqsubq_s64(a, b) +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_s32(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27237,28 +29679,25 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i8")] - fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld2q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27268,28 +29707,22 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.16i8")] - fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_s8(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27299,28 +29732,25 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i16")] - fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld2_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27330,28 +29760,22 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i16")] - fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_s8(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27361,28 +29785,33 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i32")] - fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld2q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27392,28 +29821,22 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i32")] - fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_s16(transmute(a))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27423,28 +29846,25 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.1i64")] - fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld2_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27454,28 +29874,22 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i64")] - fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] + +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(ld2) )] #[cfg_attr( not(target_arch = "arm"), @@ -27485,252 +29899,756 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld2q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f32" + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" )] - fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; + fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; } - _vrecpe_f32(a) + _vld3_dup_f32(a as _) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f32" + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" )] - fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; + fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; } - _vrecpeq_f32(a) + let mut ret_val: float32x2x3_t = _vld3_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v2i32" + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" )] - fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; + fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; } - _vrecpe_u32(a.as_signed()).as_unsigned() + _vld3q_dup_f32(a as _) } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v4i32" + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" )] - fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; + fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; } - _vrecpeq_u32(a.as_signed()).as_unsigned() + let mut ret_val: float32x4x3_t = _vld3q_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f32" + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" )] - fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; } - _vrecps_f32(a, b) + _vld3_dup_s8(a as _) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f32" + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" )] - fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; } - _vrecpsq_f32(a, b) + let mut ret_val: int8x8x3_t = _vld3_dup_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" + )] + fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; + } + _vld3q_dup_s8(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" + )] + fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; + } + let mut ret_val: int8x16x3_t = _vld3q_dup_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" + )] + fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; + } + _vld3_dup_s16(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" + )] + fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; + } + let mut ret_val: int16x4x3_t = _vld3_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" + )] + fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; + } + _vld3q_dup_s16(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" + )] + fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; + } + let mut ret_val: int16x8x3_t = _vld3q_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" + )] + fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; + } + _vld3_dup_s32(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" + )] + fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; + } + let mut ret_val: int32x2x3_t = _vld3_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" + )] + fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; + } + _vld3q_dup_s32(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" + )] + fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; + } + let mut ret_val: int32x4x3_t = _vld3q_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v1i64.p0i64" + )] + fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; + } + _vld3_dup_s64(a as _) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] + fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_dup_f32(a as *const i8, 4) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] + fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + let mut ret_val: float32x2x3_t = _vld3_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] + fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_dup_f32(a as *const i8, 4) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] + fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + let mut ret_val: float32x4x3_t = _vld3q_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] + fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_dup_s8(a as *const i8, 1) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] + fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + let mut ret_val: int8x8x3_t = _vld3_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] + fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_dup_s8(a as *const i8, 1) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] + fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + let mut ret_val: int8x16x3_t = _vld3q_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] + fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_dup_s16(a as *const i8, 2) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] + fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + let mut ret_val: int16x4x3_t = _vld3_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] + fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_dup_s16(a as *const i8, 2) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] + fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + let mut ret_val: int16x8x3_t = _vld3q_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] + fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_dup_s32(a as *const i8, 4) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] + fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + let mut ret_val: int32x2x3_t = _vld3_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] + fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_dup_s32(a as *const i8, 4) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] + fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + let mut ret_val: int32x4x3_t = _vld3q_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27740,11 +30658,29 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - transmute(a) +pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0i8")] + fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + } + _vld3_dup_s64(a as *const i8, 8) +} + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -27753,7 +30689,7 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27763,20 +30699,22 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - transmute(a) +pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27786,20 +30724,22 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27809,20 +30749,26 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld3_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27832,20 +30778,22 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27855,20 +30803,38 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27878,20 +30844,22 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27901,20 +30869,26 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld3_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27924,20 +30898,22 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - transmute(a) +pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27947,20 +30923,26 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - transmute(a) +pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27970,20 +30952,22 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - transmute(a) +pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_dup_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -27993,20 +30977,26 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - transmute(a) +pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld3_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28016,20 +31006,22 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - transmute(a) +pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_dup_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28039,20 +31031,26 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld3q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28062,20 +31060,22 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28085,20 +31085,26 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld3_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28108,20 +31114,22 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28131,20 +31139,38 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28154,20 +31180,22 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28177,20 +31205,26 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - transmute(a) +pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld3_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28200,20 +31234,22 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - transmute(a) +pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] + +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -28223,964 +31259,1574 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - transmute(a) +pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" + )] + fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; + } + _vld3_f32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" + )] + fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; + } + let mut ret_val: float32x2x3_t = _vld3_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" + )] + fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; + } + _vld3q_f32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" + )] + fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; + } + let mut ret_val: float32x4x3_t = _vld3q_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" + )] + fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; + } + _vld3_s8(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" + )] + fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; + } + let mut ret_val: int8x8x3_t = _vld3_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" + )] + fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; + } + _vld3q_s8(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" + )] + fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; + } + let mut ret_val: int8x16x3_t = _vld3q_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" + )] + fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; + } + _vld3_s16(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" + )] + fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; + } + let mut ret_val: int16x4x3_t = _vld3_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" + )] + fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; + } + _vld3q_s16(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" + )] + fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; + } + let mut ret_val: int16x8x3_t = _vld3q_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" + )] + fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; + } + _vld3_s32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" + )] + fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; + } + let mut ret_val: int32x2x3_t = _vld3_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" + )] + fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; + } + _vld3q_s32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" + )] + fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; + } + let mut ret_val: int32x4x3_t = _vld3q_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] + fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] + fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + let mut ret_val: float32x2x3_t = _vld3_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] + fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] + fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + let mut ret_val: float32x4x3_t = _vld3q_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] + fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] + fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + let mut ret_val: int8x8x3_t = _vld3_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] + fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] + fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + let mut ret_val: int8x16x3_t = _vld3q_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] + fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] + fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + let mut ret_val: int16x4x3_t = _vld3_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] + fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] + fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + let mut ret_val: int16x8x3_t = _vld3q_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] + fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_s32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] + fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + let mut ret_val: int32x2x3_t = _vld3_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] + fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_s32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] + fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + let mut ret_val: int32x4x3_t = _vld3q_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" + )] + fn _vld3_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x3_t; + } + _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" + )] + fn _vld3_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x3_t; + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: float32x2x3_t = _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" + )] + fn _vld3q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x3_t; + } + _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" + )] + fn _vld3q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x3_t; + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: float32x4x3_t = _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] + fn _vld3_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x3_t; + } + _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] + fn _vld3_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x3_t; + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: float32x2x3_t = _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" + )] + fn _vld3_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x3_t; + } + _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" + )] + fn _vld3_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x3_t; + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x3_t = _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" + )] + fn _vld3_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x3_t; + } + _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" + )] + fn _vld3_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x3_t; + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: int16x4x3_t = _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" + )] + fn _vld3q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" + )] + fn _vld3q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x3_t; + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x3_t = _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" + )] + fn _vld3_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x3_t; + } + _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" + )] + fn _vld3_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x3_t; + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: int32x2x3_t = _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" + )] + fn _vld3q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" + )] + fn _vld3q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x3_t; + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: int32x4x3_t = _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] + fn _vld3_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x3_t; + } + _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] + fn _vld3_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x3_t; + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x3_t = _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] + fn _vld3_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x3_t; + } + _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] + fn _vld3_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x3_t; + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: int16x4x3_t = _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] + fn _vld3q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] + fn _vld3q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x3_t; + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x3_t = _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] + fn _vld3_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x3_t; + } + _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] + fn _vld3_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x3_t; + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: int32x2x3_t = _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] + fn _vld3q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} + +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] + fn _vld3q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x3_t; + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: int32x4x3_t = _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29189,21 +32835,25 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29212,21 +32862,33 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint8x8x3_t = transmute(vld3_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29235,21 +32897,25 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29258,21 +32924,33 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: uint16x4x3_t = transmute(vld3_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29281,21 +32959,25 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29304,21 +32986,33 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint16x8x3_t = transmute(vld3q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29327,21 +33021,25 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - transmute(a) +pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29350,21 +33048,33 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - transmute(a) +pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + let mut ret_val: uint32x2x3_t = transmute(vld3_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29373,21 +33083,25 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - transmute(a) +pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29396,21 +33110,33 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - transmute(a) +pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: uint32x4x3_t = transmute(vld3q_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29419,21 +33145,25 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29442,21 +33172,33 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly8x8x3_t = transmute(vld3_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29465,21 +33207,25 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29488,21 +33234,33 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: poly16x4x3_t = transmute(vld3_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29511,21 +33269,25 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29534,16 +33296,26 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly16x8x3_t = transmute(vld3q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -29557,57 +33329,49 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - transmute(a) +pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - transmute(a) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v1i64.p0v1i64" + )] + fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; + } + _vld3_s64(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0i8")] + fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + } + _vld3_s64(a as *const i8, 8) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -29626,20 +33390,22 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - transmute(a) +pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29649,20 +33415,22 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29672,20 +33440,26 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29695,20 +33469,22 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29718,20 +33494,38 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29741,20 +33535,22 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29764,20 +33560,26 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29787,20 +33589,22 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - transmute(a) +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29810,20 +33614,26 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - transmute(a) +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29833,20 +33643,22 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - transmute(a) +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29856,20 +33668,26 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - transmute(a) +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29879,20 +33697,22 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29902,20 +33722,26 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29925,20 +33751,22 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29948,20 +33776,26 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29971,20 +33805,22 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -29994,20 +33830,38 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -30017,20 +33871,22 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - transmute(a) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -30040,20 +33896,26 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - transmute(a) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -30063,20 +33925,22 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - transmute(a) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -30086,411 +33950,843 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - transmute(a) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; + } + _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] + +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + let mut ret_val: float32x4x3_t = _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] + fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_dup_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] + fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + let mut ret_val: float32x2x4_t = _vld4_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] + fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] + fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + let mut ret_val: float32x4x4_t = _vld4q_dup_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] + fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_dup_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] + fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + let mut ret_val: int8x8x4_t = _vld4_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] + fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_dup_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] + fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + let mut ret_val: int8x16x4_t = _vld4q_dup_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] + fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_dup_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] + fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + let mut ret_val: int16x4x4_t = _vld4_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] + fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_dup_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] + fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + let mut ret_val: int16x8x4_t = _vld4q_dup_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] + fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] + fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + let mut ret_val: int32x2x4_t = _vld4_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] + fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as *const i8, 4) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] + fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + let mut ret_val: int32x4x4_t = _vld4q_dup_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" + )] + fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + } + _vld4_dup_f32(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" + )] + fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + } + let mut ret_val: float32x2x4_t = _vld4_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" + )] + fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" + )] + fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + } + let mut ret_val: float32x4x4_t = _vld4q_dup_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" + )] + fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + } + _vld4_dup_s8(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" + )] + fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + } + let mut ret_val: int8x8x4_t = _vld4_dup_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" + )] + fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + } + _vld4q_dup_s8(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" + )] + fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + } + let mut ret_val: int8x16x4_t = _vld4q_dup_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" + )] + fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + } + _vld4_dup_s16(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" + )] + fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + } + let mut ret_val: int16x4x4_t = _vld4_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" + )] + fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + } + _vld4q_dup_s16(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" + )] + fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + } + let mut ret_val: int16x8x4_t = _vld4q_dup_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" + )] + fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" + )] + fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + } + let mut ret_val: int32x2x4_t = _vld4_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" + )] + fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" + )] + fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + } + let mut ret_val: int32x4x4_t = _vld4q_dup_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64" + )] + fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; + } + _vld4_dup_s64(a as _) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30500,11 +34796,29 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - transmute(a) +pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0i8")] + fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + } + _vld4_dup_s64(a as *const i8, 8) +} + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -30513,7 +34827,7 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30523,20 +34837,22 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - transmute(a) +pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30546,20 +34862,22 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - transmute(a) +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30569,20 +34887,27 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - transmute(a) +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30592,20 +34917,22 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30615,20 +34942,43 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30638,20 +34988,22 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30661,20 +35013,27 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30684,20 +35043,22 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30707,20 +35068,27 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - transmute(a) +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30730,20 +35098,22 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - transmute(a) +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_dup_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30753,20 +35123,27 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - transmute(a) +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30776,20 +35153,22 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - transmute(a) +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_dup_s32(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30799,20 +35178,27 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - transmute(a) +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30822,20 +35208,22 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30845,20 +35233,27 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30868,20 +35263,22 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30891,20 +35288,43 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30914,20 +35334,22 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30937,20 +35359,27 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - transmute(a) +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30960,20 +35389,22 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - transmute(a) +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] + +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -30983,1975 +35414,66524 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - transmute(a) +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" + )] + fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; + } + _vld4_f32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" + )] + fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; + } + let mut ret_val: float32x2x4_t = _vld4_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" + )] + fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; + } + _vld4q_f32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" + )] + fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; + } + let mut ret_val: float32x4x4_t = _vld4q_f32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" + )] + fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; + } + _vld4_s8(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" + )] + fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; + } + let mut ret_val: int8x8x4_t = _vld4_s8(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" + )] + fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; + } + _vld4q_s8(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" + )] + fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; + } + let mut ret_val: int8x16x4_t = _vld4q_s8(a as _); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" + )] + fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; + } + _vld4_s16(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" + )] + fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; + } + let mut ret_val: int16x4x4_t = _vld4_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" + )] + fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; + } + _vld4q_s16(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" + )] + fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; + } + let mut ret_val: int16x8x4_t = _vld4q_s16(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" + )] + fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; + } + _vld4_s32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" + )] + fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; + } + let mut ret_val: int32x2x4_t = _vld4_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" + )] + fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; + } + _vld4q_s32(a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" + )] + fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; + } + let mut ret_val: int32x4x4_t = _vld4q_s32(a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] + fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] + fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + let mut ret_val: float32x2x4_t = _vld4_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] + fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_f32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] + fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + let mut ret_val: float32x4x4_t = _vld4q_f32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] + fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] + fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + let mut ret_val: int8x8x4_t = _vld4_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] + fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_s8(a as *const i8, 1) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] + fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + let mut ret_val: int8x16x4_t = _vld4q_s8(a as *const i8, 1); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] + fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] + fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + let mut ret_val: int16x4x4_t = _vld4_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] + fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_s16(a as *const i8, 2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] + fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + let mut ret_val: int16x8x4_t = _vld4q_s16(a as *const i8, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] + fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_s32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] + fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + let mut ret_val: int32x2x4_t = _vld4_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] + fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_s32(a as *const i8, 4) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] + fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + let mut ret_val: int32x4x4_t = _vld4q_s32(a as *const i8, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" + )] + fn _vld4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x4_t; + } + _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" + )] + fn _vld4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x4_t; + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: float32x2x4_t = _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" + )] + fn _vld4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x4_t; + } + _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" + )] + fn _vld4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x4_t; + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: float32x4x4_t = _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" + )] + fn _vld4_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x4_t; + } + _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" + )] + fn _vld4_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x4_t; + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x4_t = _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" + )] + fn _vld4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x4_t; + } + _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" + )] + fn _vld4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x4_t; + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: int16x4x4_t = _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" + )] + fn _vld4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" + )] + fn _vld4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x4_t; + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x4_t = _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Vector reinterpret cast operation"] + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" + )] + fn _vld4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x4_t; + } + _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" + )] + fn _vld4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x4_t; + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: int32x2x4_t = _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" + )] + fn _vld4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" + )] + fn _vld4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x4_t; + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: int32x4x4_t = _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] + fn _vld4_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x4_t; + } + _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] + fn _vld4_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x4_t; + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: float32x2x4_t = _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] + fn _vld4q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x4_t; + } + _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] + fn _vld4q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x4_t; + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: float32x4x4_t = _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] + fn _vld4_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x4_t; + } + _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] + fn _vld4_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x4_t; + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int8x8x4_t = _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] + fn _vld4_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x4_t; + } + _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] + fn _vld4_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x4_t; + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: int16x4x4_t = _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] + fn _vld4q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] + fn _vld4q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x4_t; + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: int16x8x4_t = _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] + fn _vld4_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x4_t; + } + _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] + fn _vld4_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x4_t; + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: int32x2x4_t = _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] + fn _vld4q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] + fn _vld4q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x4_t; + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: int32x4x4_t = _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint8x8x4_t = transmute(vld4_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: uint16x4x4_t = transmute(vld4_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: uint16x8x4_t = transmute(vld4q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4_lane_s32::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + let mut ret_val: uint32x2x4_t = transmute(vld4_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4q_lane_s32::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: uint32x4x4_t = transmute(vld4q_lane_s32::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly8x8x4_t = transmute(vld4_lane_s8::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + let mut ret_val: poly16x4x4_t = transmute(vld4_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let mut ret_val: poly16x8x4_t = transmute(vld4q_lane_s16::(transmute(a), transmute(b))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_s64(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v1i64.p0v1i64" + )] + fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; + } + _vld4_s64(a as _) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0i8")] + fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + } + _vld4_s64(a as *const i8, 8) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_s64(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_s8(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_s8(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_s16(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_s16(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_s32(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_s32(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_s8(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_s8(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_s16(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_s16(transmute(a))) +} + +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f32" + )] + fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vmax_f32(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f32" + )] + fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vmax_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f32" + )] + fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vmaxq_f32(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f32" + )] + fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vmaxq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i8" + )] + fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vmax_s8(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i8" + )] + fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vmax_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v16i8" + )] + fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vmaxq_s8(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v16i8" + )] + fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vmaxq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i16" + )] + fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vmax_s16(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i16" + )] + fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vmax_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i16" + )] + fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vmaxq_s16(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v8i16" + )] + fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vmaxq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v2i32" + )] + fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vmax_s32(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v2i32" + )] + fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vmax_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i32" + )] + fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vmaxq_s32(a, b) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smax.v4i32" + )] + fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vmaxq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v8i8" + )] + fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v8i8" + )] + fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v16i8" + )] + fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v16i8" + )] + fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i16" + )] + fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i16" + )] + fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v8i16" + )] + fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v8i16" + )] + fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v2i32" + )] + fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v2i32" + )] + fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i32" + )] + fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umax) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umax.v4i32" + )] + fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f32" + )] + fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vmaxnm_f32(a, b) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f32" + )] + fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vmaxnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f32" + )] + fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vmaxnmq_f32(a, b) +} + +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f32" + )] + fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vmaxnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f32" + )] + fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vmin_f32(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f32" + )] + fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vmin_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f32" + )] + fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vminq_f32(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f32" + )] + fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vminq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i8" + )] + fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vmin_s8(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i8" + )] + fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vmin_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v16i8" + )] + fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vminq_s8(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v16i8" + )] + fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vminq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i16" + )] + fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vmin_s16(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i16" + )] + fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vmin_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i16" + )] + fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vminq_s16(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v8i16" + )] + fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vminq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v2i32" + )] + fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vmin_s32(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v2i32" + )] + fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vmin_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i32" + )] + fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vminq_s32(a, b) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smin.v4i32" + )] + fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vminq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i8" + )] + fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i8" + )] + fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v16i8" + )] + fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v16i8" + )] + fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i16" + )] + fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i16" + )] + fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i16" + )] + fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i16" + )] + fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v2i32" + )] + fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v2i32" + )] + fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i32" + )] + fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umin) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i32" + )] + fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f32" + )] + fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vminnm_f32(a, b) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f32" + )] + fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vminnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v4f32" + )] + fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vminnmq_f32(a, b) +} + +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminnm) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v4f32" + )] + fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vminnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x4_t = vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x8_t = vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x8_t = vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x4_t = vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x4_t = vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmla_f32(a, b, vdup_n_f32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = vmla_f32(a, b, vdup_n_f32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlaq_f32(a, b, vdupq_n_f32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = vmlaq_f32(a, b, vdupq_n_f32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmla_s16(a, b, vdup_n_s16(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = vmla_s16(a, b, vdup_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlaq_s16(a, b, vdupq_n_s16(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vmlaq_s16(a, b, vdupq_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmla_u16(a, b, vdup_n_u16(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vmla_u16(a, b, vdup_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlaq_u16(a, b, vdupq_n_u16(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vmlaq_u16(a, b, vdupq_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmla_s32(a, b, vdup_n_s32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = vmla_s32(a, b, vdup_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlaq_s32(a, b, vdupq_n_s32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlaq_s32(a, b, vdupq_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmla_u32(a, b, vdup_n_u32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = vmla_u32(a, b, vdup_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlaq_u32(a, b, vdupq_n_u32(c)) +} + +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlaq_u32(a, b, vdupq_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x2_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + simd_add(a, simd_mul(b, c)) +} + +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mla) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_add(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlal_s16(a, b, vdup_n_s16(c)) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlal_s16(a, b, vdup_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlal_s32(a, b, vdup_n_s32(c)) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vmlal_s32(a, b, vdup_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlal_u16(a, b, vdup_n_u16(c)) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlal_u16(a, b, vdup_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlal_u32(a, b, vdup_n_u32(c)) +} + +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vmlal_u32(a, b, vdup_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + simd_add(a, vmull_s8(b, c)) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_add(a, vmull_s8(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + simd_add(a, vmull_s16(b, c)) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_add(a, vmull_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + simd_add(a, vmull_s32(b, c)) +} + +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = simd_add(a, vmull_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + simd_add(a, vmull_u8(b, c)) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_add(a, vmull_u8(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + simd_add(a, vmull_u16(b, c)) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_add(a, vmull_u16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + simd_add(a, vmull_u32(b, c)) +} + +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = simd_add(a, vmull_u32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: float32x4_t = vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: float32x4_t = vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x8_t = vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x8_t = vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x4_t = vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x4_t = vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmls_f32(a, b, vdup_n_f32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = vmls_f32(a, b, vdup_n_f32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlsq_f32(a, b, vdupq_n_f32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = vmlsq_f32(a, b, vdupq_n_f32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmls_s16(a, b, vdup_n_s16(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = vmls_s16(a, b, vdup_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlsq_s16(a, b, vdupq_n_s16(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vmlsq_s16(a, b, vdupq_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmls_u16(a, b, vdup_n_u16(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vmls_u16(a, b, vdup_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlsq_u16(a, b, vdupq_n_u16(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vmlsq_u16(a, b, vdupq_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmls_s32(a, b, vdup_n_s32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = vmls_s32(a, b, vdup_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlsq_s32(a, b, vdupq_n_s32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlsq_s32(a, b, vdupq_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmls_u32(a, b, vdup_n_u32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = vmls_u32(a, b, vdup_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlsq_u32(a, b, vdupq_n_u32(c)) +} + +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlsq_u32(a, b, vdupq_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint32x2_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + simd_sub(a, simd_mul(b, c)) +} + +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_sub(a, simd_mul(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlsl_s16(a, b, vdup_n_s16(c)) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmlsl_s16(a, b, vdup_n_s16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlsl_s32(a, b, vdup_n_s32(c)) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vmlsl_s32(a, b, vdup_n_s32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlsl_u16(a, b, vdup_n_u16(c)) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmlsl_u16(a, b, vdup_n_u16(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlsl_u32(a, b, vdup_n_u32(c)) +} + +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vmlsl_u32(a, b, vdup_n_u32(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + simd_sub(a, vmull_s8(b, c)) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_sub(a, vmull_s8(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + simd_sub(a, vmull_s16(b, c)) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_sub(a, vmull_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + simd_sub(a, vmull_s32(b, c)) +} + +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = simd_sub(a, vmull_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + simd_sub(a, vmull_u8(b, c)) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_sub(a, vmull_u8(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + simd_sub(a, vmull_u16(b, c)) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_sub(a, vmull_u16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + simd_sub(a, vmull_u32(b, c)) +} + +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: uint64x2_t = simd_sub(a, vmull_u32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] + fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vmmlaq_s32(a, b, c) +} + +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smmla) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] + fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int32x4_t = _vmmlaq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ummla) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] + fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +} + +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ummla) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] + fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint32x4_t = + _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x8_t = simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x8_t = simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { + simd_mul(a, vdup_n_f32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_mul(a, vdup_n_f32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { + simd_mul(a, vdupq_n_f32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_mul(a, vdupq_n_f32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + simd_mul(a, vdup_n_s16(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_mul(a, vdup_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + simd_mul(a, vdupq_n_s16(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_mul(a, vdupq_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + simd_mul(a, vdup_n_s32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = simd_mul(a, vdup_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + simd_mul(a, vdupq_n_s32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_mul(a, vdupq_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { + simd_mul(a, vdup_n_u16(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_mul(a, vdup_n_u16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { + simd_mul(a, vdupq_n_u16(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_mul(a, vdupq_n_u16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { + simd_mul(a, vdup_n_u32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = simd_mul(a, vdup_n_u32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { + simd_mul(a, vdupq_n_u32(b)) +} + +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_mul(a, vdupq_n_u32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v8i8" + )] + fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; + } + _vmul_p8(a, b) +} + +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v8i8" + )] + fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; + } + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = _vmul_p8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v16i8" + )] + fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; + } + _vmulq_p8(a, b) +} + +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v16i8" + )] + fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; + } + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = _vmulq_p8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_mul(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_mul(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_mul(a, b) +} + +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mul) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_mul(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} + +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vmull_s16(a, vdup_n_s16(b)) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vmull_s16(a, vdup_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vmull_s32(a, vdup_n_s32(b)) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = vmull_s32(a, vdup_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { + vmull_u16(a, vdup_n_u16(b)) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vmull_u16(a, vdup_n_u16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { + vmull_u32(a, vdup_n_u32(b)) +} + +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = vmull_u32(a, vdup_n_u32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] + fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; + } + _vmull_p8(a, b) +} + +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(pmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] + fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; + } + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = _vmull_p8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] + fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vmull_s16(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] + fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] + fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vmull_s32(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] + fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] + fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + _vmull_s8(a, b) +} + +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] + fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vmull_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] + fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] + fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] + fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] + fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] + fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] + fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_neg(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { + simd_neg(a) +} + +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(neg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_neg(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_or(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_or(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_or(a, b) +} + +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_or(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + let x: int16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s8(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: int16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s8(b), a); + }; + let ret_val: int16x4_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let x: int16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s8(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x: int16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s8(b), a); + }; + let ret_val: int16x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + let x: int32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s16(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let x: int32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s16(b), a); + }; + let ret_val: int32x2_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let x: int32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s16(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: int32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s16(b), a); + }; + let ret_val: int32x4_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + let x: int64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s32(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let x: int64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s32(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let x: int64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s32(b), a); + }; + x +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let x: int64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s32(b), a); + }; + let ret_val: int64x2_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + let x: uint16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u8(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: uint16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u8(b), a); + }; + let ret_val: uint16x4_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let x: uint16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u8(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let x: uint16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u8(b), a); + }; + let ret_val: uint16x8_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + let x: uint32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u16(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let x: uint32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u16(b), a); + }; + let ret_val: uint32x2_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let x: uint32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u16(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: uint32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u16(b), a); + }; + let ret_val: uint32x4_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + let x: uint64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u32(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let x: uint64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u32(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let x: uint64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u32(b), a); + }; + x +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uadalp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let x: uint64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u32(b), a); + }; + let ret_val: uint64x2_t = x; + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(faddp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f32" + )] + fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpadd_f32(a, b) +} + +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(faddp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f32" + )] + fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vpadd_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v8i8")] + fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpadd_s8(a, b) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v8i8")] + fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vpadd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v4i16")] + fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpadd_s16(a, b) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v4i16")] + fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vpadd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v2i32")] + fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpadd_s32(a, b) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v2i32")] + fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vpadd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + transmute(vpadd_s8(transmute(a), transmute(b))) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vpadd_s8(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + transmute(vpadd_s16(transmute(a), transmute(b))) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vpadd_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + transmute(vpadd_s32(transmute(a), transmute(b))) +} + +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(addp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vpadd_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] + fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; + } + _vpaddl_s8(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] + fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = _vpaddl_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] + fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; + } + _vpaddlq_s8(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] + fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int16x8_t = _vpaddlq_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] + fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; + } + _vpaddl_s16(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] + fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x2_t = _vpaddl_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] + fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; + } + _vpaddlq_s16(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] + fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x4_t = _vpaddlq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] + fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; + } + _vpaddl_s32(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] + fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpaddl_s32(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] + fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; + } + _vpaddlq_s32(a) +} + +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(saddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] + fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int64x2_t = _vpaddlq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] + fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; + } + _vpaddl_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] + fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x4_t = _vpaddl_u8(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] + fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; + } + _vpaddlq_u8(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] + fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint16x8_t = _vpaddlq_u8(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] + fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; + } + _vpaddl_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] + fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x2_t = _vpaddl_u16(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] + fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; + } + _vpaddlq_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] + fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint32x4_t = _vpaddlq_u16(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] + fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; + } + _vpaddl_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] + fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + _vpaddl_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] + fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; + } + _vpaddlq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uaddlp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] + fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint64x2_t = _vpaddlq_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxs))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] + fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmax_f32(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxs))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] + fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vpmax_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] + fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmax_s8(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] + fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vpmax_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] + fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmax_s16(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] + fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vpmax_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] + fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmax_s32(a, b) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(smaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] + fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vpmax_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] + fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] + fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] + fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] + fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] + fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] + fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmins))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] + fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmin_f32(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmins))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] + fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vpmin_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] + fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmin_s8(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] + fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vpmin_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] + fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmin_s16(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] + fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vpmin_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] + fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmin_s32(a, b) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] + fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vpmin_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] + fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] + fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] + fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] + fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] + fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uminp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] + fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] + fn _vqabs_s8(a: int8x8_t) -> int8x8_t; + } + _vqabs_s8(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] + fn _vqabs_s8(a: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqabs_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] + fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; + } + _vqabsq_s8(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] + fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqabsq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] + fn _vqabs_s16(a: int16x4_t) -> int16x4_t; + } + _vqabs_s16(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] + fn _vqabs_s16(a: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqabs_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] + fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; + } + _vqabsq_s16(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] + fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqabsq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] + fn _vqabs_s32(a: int32x2_t) -> int32x2_t; + } + _vqabs_s32(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] + fn _vqabs_s32(a: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqabs_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] + fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; + } + _vqabsq_s32(a) +} + +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqabs) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] + fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqabsq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] + fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqadd_s8(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] + fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqadd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] + fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqaddq_s8(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] + fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqaddq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] + fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqadd_s16(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] + fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqadd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] + fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqaddq_s16(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] + fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqaddq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] + fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqadd_s32(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] + fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqadd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] + fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqaddq_s32(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] + fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqaddq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] + fn _vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqadd_s64(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] + fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqaddq_s64(a, b) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] + fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vqaddq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] + fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] + fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] + fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] + fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] + fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] + fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] + fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] + fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] + fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] + fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] + fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] + fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] + fn _vqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] + fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] + fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s32(a, vqdmull_lane_s16::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_lane_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqaddq_s64(a, vqdmull_lane_s32::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_lane_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqaddq_s32(a, vqdmull_n_s16(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_n_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqaddq_s64(a, vqdmull_n_s32(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_n_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqaddq_s32(a, vqdmull_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqaddq_s64(a, vqdmull_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s32(a, vqdmull_lane_s16::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_lane_s16::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqsubq_s64(a, vqdmull_lane_s32::(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_lane_s32::(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqsubq_s32(a, vqdmull_n_s16(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_n_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqsubq_s64(a, vqdmull_n_s32(b, c)) +} + +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_n_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqsubq_s32(a, vqdmull_s16(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_s16(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqsubq_s64(a, vqdmull_s32(b, c)) +} + +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_s32(b, c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) +} + +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let b: int16x4_t = vdup_n_s16(b); + vqdmulh_s16(a, b) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = vdup_n_s16(b); + let ret_val: int16x4_t = vqdmulh_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let b: int16x8_t = vdupq_n_s16(b); + vqdmulhq_s16(a, b) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = vdupq_n_s16(b); + let ret_val: int16x8_t = vqdmulhq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let b: int32x2_t = vdup_n_s32(b); + vqdmulh_s32(a, b) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = vdup_n_s32(b); + let ret_val: int32x2_t = vqdmulh_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let b: int32x4_t = vdupq_n_s32(b); + vqdmulhq_s32(a, b) +} + +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = vdupq_n_s32(b); + let ret_val: int32x4_t = vqdmulhq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i16" + )] + fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqdmulh_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i16" + )] + fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqdmulh_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v8i16" + )] + fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqdmulhq_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v8i16" + )] + fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqdmulhq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v2i32" + )] + fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqdmulh_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v2i32" + )] + fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqdmulh_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i32" + )] + fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqdmulhq_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i32" + )] + fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqdmulhq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + let ret_val: int32x4_t = vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) +} + +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull, N = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + let ret_val: int64x2_t = vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vqdmull_s16(a, vdup_n_s16(b)) +} + +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqdmull_s16(a, vdup_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vqdmull_s32(a, vdup_n_s32(b)) +} + +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = vqdmull_s32(a, vdup_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v4i32" + )] + fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vqdmull_s16(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v4i32" + )] + fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqdmull_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v2i64" + )] + fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vqdmull_s32(a, b) +} + +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v2i64" + )] + fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vqdmull_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v8i8" + )] + fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; + } + _vqmovn_s16(a) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v8i8" + )] + fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqmovn_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v4i16" + )] + fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; + } + _vqmovn_s32(a) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v4i16" + )] + fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqmovn_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v2i32" + )] + fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; + } + _vqmovn_s64(a) +} + +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v2i32" + )] + fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqmovn_s64(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v8i8" + )] + fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; + } + _vqmovn_u16(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v8i8" + )] + fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqmovn_u16(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v4i16" + )] + fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; + } + _vqmovn_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v4i16" + )] + fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqmovn_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v2i32" + )] + fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; + } + _vqmovn_u64(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v2i32" + )] + fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqmovn_u64(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v8i8" + )] + fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; + } + _vqmovun_s16(a).as_unsigned() +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v8i8" + )] + fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqmovun_s16(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v4i16" + )] + fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; + } + _vqmovun_s32(a).as_unsigned() +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v4i16" + )] + fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqmovun_s32(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v2i32" + )] + fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; + } + _vqmovun_s64(a).as_unsigned() +} + +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtun) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v2i32" + )] + fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqmovun_s64(a).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] + fn _vqneg_s8(a: int8x8_t) -> int8x8_t; + } + _vqneg_s8(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] + fn _vqneg_s8(a: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqneg_s8(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] + fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; + } + _vqnegq_s8(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] + fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqnegq_s8(a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] + fn _vqneg_s16(a: int16x4_t) -> int16x4_t; + } + _vqneg_s16(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] + fn _vqneg_s16(a: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqneg_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] + fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; + } + _vqnegq_s16(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] + fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqnegq_s16(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] + fn _vqneg_s32(a: int32x2_t) -> int32x2_t; + } + _vqneg_s32(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] + fn _vqneg_s32(a: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqneg_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] + fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; + } + _vqnegq_s32(a) +} + +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqneg) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] + fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqnegq_s32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulh_s16(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmulh_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + vqrdmulh_s32(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmulh_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulh_s16(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int16x4_t = vqrdmulh_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + vqrdmulh_s32(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vqrdmulh_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmulhq_s16(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmulhq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulhq_s32(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmulhq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmulhq_s16(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + let ret_val: int16x8_t = vqrdmulhq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulhq_s32(a, b) +} + +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh, LANE = 1) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vqrdmulhq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + vqrdmulh_s16(a, vdup_n_s16(b)) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = vqrdmulh_s16(a, vdup_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + vqrdmulhq_s16(a, vdupq_n_s16(b)) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vqrdmulhq_s16(a, vdupq_n_s16(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + vqrdmulh_s32(a, vdup_n_s32(b)) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = vqrdmulh_s32(a, vdup_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + vqrdmulhq_s32(a, vdupq_n_s32(b)) +} + +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqrdmulhq_s32(a, vdupq_n_s32(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" + )] + fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrdmulh_s16(a, b) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" + )] + fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrdmulh_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" + )] + fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrdmulhq_s16(a, b) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" + )] + fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqrdmulhq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" + )] + fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrdmulh_s32(a, b) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" + )] + fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqrdmulh_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" + )] + fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrdmulhq_s32(a, b) +} + +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrdmulh) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" + )] + fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqrdmulhq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i8" + )] + fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqrshl_s8(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i8" + )] + fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqrshl_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v16i8" + )] + fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqrshlq_s8(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v16i8" + )] + fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqrshlq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i16" + )] + fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrshl_s16(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i16" + )] + fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrshl_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i16" + )] + fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrshlq_s16(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i16" + )] + fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqrshlq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i32" + )] + fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrshl_s32(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i32" + )] + fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqrshl_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i32" + )] + fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrshlq_s32(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i32" + )] + fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqrshlq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v1i64" + )] + fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqrshl_s64(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i64" + )] + fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqrshlq_s64(a, b) +} + +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i64" + )] + fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vqrshlq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i8" + )] + fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqrshl_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i8" + )] + fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqrshl_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v16i8" + )] + fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqrshlq_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v16i8" + )] + fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqrshlq_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i16" + )] + fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrshl_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i16" + )] + fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqrshl_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i16" + )] + fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrshlq_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i16" + )] + fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqrshlq_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i32" + )] + fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrshl_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i32" + )] + fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vqrshl_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i32" + )] + fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrshlq_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i32" + )] + fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqrshlq_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v1i64" + )] + fn _vqrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqrshl_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i64" + )] + fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqrshlq_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqrshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i64" + )] + fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vqrshlq_u64(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] + fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] + fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] + fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] + fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] + fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] + fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + )] + fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrn_n_s16(a, N) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + )] + fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqrshrn_n_s16(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + )] + fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrn_n_s32(a, N) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + )] + fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqrshrn_n_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + )] + fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrn_n_s64(a, N) +} + +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + )] + fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqrshrn_n_s64(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] + fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] + fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqrshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] + fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] + fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqrshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] + fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] + fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqrshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v8i8" + )] + fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v8i8" + )] + fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqrshrn_n_u16(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v4i16" + )] + fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v4i16" + )] + fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqrshrn_n_u32(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v2i32" + )] + fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v2i32" + )] + fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqrshrn_n_u64(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] + fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] + fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqrshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] + fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] + fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqrshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] + fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] + fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = + _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v8i8" + )] + fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrun_n_s16(a, N).as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v8i8" + )] + fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqrshrun_n_s16(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v4i16" + )] + fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrun_n_s32(a, N).as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v4i16" + )] + fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqrshrun_n_s32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v2i32" + )] + fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrun_n_s64(a, N).as_unsigned() +} + +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v2i32" + )] + fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqrshrun_n_s64(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_s8(a, vdup_n_s8(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vqshl_s8(a, vdup_n_s8(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_s8(a, vdupq_n_s8(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vqshlq_s8(a, vdupq_n_s8(N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_s16(a, vdup_n_s16(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = vqshl_s16(a, vdup_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_s16(a, vdupq_n_s16(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vqshlq_s16(a, vdupq_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_s32(a, vdup_n_s32(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = vqshl_s32(a, vdup_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_s32(a, vdupq_n_s32(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vqshlq_s32(a, vdupq_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_s64(a, vdup_n_s64(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_s64(a, vdupq_n_s64(N as _)) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = vqshlq_s64(a, vdupq_n_s64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_u8(a, vdup_n_s8(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = vqshl_u8(a, vdup_n_s8(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_u8(a, vdupq_n_s8(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = vqshlq_u8(a, vdupq_n_s8(N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_u16(a, vdup_n_s16(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vqshl_u16(a, vdup_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_u16(a, vdupq_n_s16(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vqshlq_u16(a, vdupq_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_u32(a, vdup_n_s32(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = vqshl_u32(a, vdup_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_u32(a, vdupq_n_s32(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vqshlq_u32(a, vdupq_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_u64(a, vdup_n_s64(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_u64(a, vdupq_n_s64(N as _)) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = vqshlq_u64(a, vdupq_n_s64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i8" + )] + fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqshl_s8(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i8" + )] + fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqshl_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v16i8" + )] + fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqshlq_s8(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v16i8" + )] + fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqshlq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i16" + )] + fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqshl_s16(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i16" + )] + fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqshl_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i16" + )] + fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqshlq_s16(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i16" + )] + fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqshlq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i32" + )] + fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqshl_s32(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i32" + )] + fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqshl_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i32" + )] + fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqshlq_s32(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i32" + )] + fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqshlq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v1i64" + )] + fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqshl_s64(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i64" + )] + fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqshlq_s64(a, b) +} + +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i64" + )] + fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vqshlq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i8" + )] + fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqshl_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i8" + )] + fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshl_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v16i8" + )] + fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqshlq_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v16i8" + )] + fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqshlq_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i16" + )] + fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqshl_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i16" + )] + fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshl_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i16" + )] + fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqshlq_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i16" + )] + fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqshlq_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i32" + )] + fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqshl_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i32" + )] + fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vqshl_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i32" + )] + fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqshlq_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i32" + )] + fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqshlq_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v1i64" + )] + fn _vqshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqshl_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i64" + )] + fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqshlq_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i64" + )] + fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vqshlq_u64(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + } + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i8" + )] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i8" + )] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v16i8" + )] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v16i8" + )] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i16" + )] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i16" + )] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i16" + )] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i16" + )] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i32" + )] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i32" + )] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i32" + )] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i32" + )] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v1i64" + )] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + } + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i64" + )] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i64" + )] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] + fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] + fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] + fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] + fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] + fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] + fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v8i8" + )] + fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrn_n_s16(a, N) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v8i8" + )] + fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqshrn_n_s16(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v4i16" + )] + fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrn_n_s32(a, N) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v4i16" + )] + fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqshrn_n_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v2i32" + )] + fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrn_n_s64(a, N) +} + +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v2i32" + )] + fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vqshrn_n_s64(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] + fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] + fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] + fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] + fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] + fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] + fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v8i8" + )] + fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrn_n_u16(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v8i8" + )] + fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshrn_n_u16(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v4i16" + )] + fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrn_n_u32(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v4i16" + )] + fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshrn_n_u32(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v2i32" + )] + fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrn_n_u64(a.as_signed(), N).as_unsigned() +} + +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v2i32" + )] + fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqshrn_n_u64(a.as_signed(), N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + .as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + .as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = + _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v8i8" + )] + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrun_n_s16(a, N).as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v8i8" + )] + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqshrun_n_s16(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v4i16" + )] + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrun_n_s32(a, N).as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v4i16" + )] + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqshrun_n_s32(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v2i32" + )] + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrun_n_s64(a, N).as_unsigned() +} + +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v2i32" + )] + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vqshrun_n_s64(a, N).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i8")] + fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqsub_s8(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i8")] + fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vqsub_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.16i8")] + fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqsubq_s8(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.16i8")] + fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vqsubq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i16")] + fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqsub_s16(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i16")] + fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vqsub_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i16")] + fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqsubq_s16(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i16")] + fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vqsubq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i32")] + fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqsub_s32(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i32")] + fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vqsub_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i32")] + fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqsubq_s32(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i32")] + fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vqsubq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.1i64")] + fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqsub_s64(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i64")] + fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqsubq_s64(a, b) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i64")] + fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vqsubq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i8")] + fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i8")] + fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.16i8")] + fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.16i8")] + fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i16")] + fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i16")] + fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i16")] + fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i16")] + fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i32")] + fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i32")] + fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i32")] + fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i32")] + fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.1i64")] + fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i64")] + fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i64")] + fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x = vraddhn_s16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x = vraddhn_s16(b, c); + let ret_val: int8x16_t = + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x = vraddhn_s32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let x = vraddhn_s32(b, c); + let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x = vraddhn_s64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let x = vraddhn_s64(b, c); + let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); + let ret_val: uint8x16_t = + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); + let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3]) +} + +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); + let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i16")] + fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + _vraddhn_s16(a, b) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i16")] + fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vraddhn_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i32")] + fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + _vraddhn_s32(a, b) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i32")] + fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vraddhn_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i64")] + fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + _vraddhn_s64(a, b) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i64")] + fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vraddhn_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + transmute(vraddhn_s16(transmute(a), transmute(b))) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + transmute(vraddhn_s32(transmute(a), transmute(b))) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + transmute(vraddhn_s64(transmute(a), transmute(b))) +} + +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(raddhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v2f32" + )] + fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; + } + _vrecpe_f32(a) +} + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v2f32" + )] + fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrecpe_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v4f32" + )] + fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; + } + _vrecpeq_f32(a) +} + +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v4f32" + )] + fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrecpeq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v2i32" + )] + fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; + } + _vrecpe_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v2i32" + )] + fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vrecpe_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v4i32" + )] + fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; + } + _vrecpeq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urecpe) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v4i32" + )] + fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vrecpeq_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecps) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v2f32" + )] + fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vrecps_f32(a, b) +} + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecps) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v2f32" + )] + fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vrecps_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecps) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v4f32" + )] + fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vrecpsq_f32(a, b) +} + +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frecps) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v4f32" + )] + fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrecpsq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + transmute(a) +} + +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] + fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vrhadd_s8(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] + fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrhadd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] + fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vrhaddq_s8(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] + fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vrhaddq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] + fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vrhadd_s16(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] + fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vrhadd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] + fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrhaddq_s16(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] + fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vrhaddq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] + fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrhadd_s32(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] + fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vrhadd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] + fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrhaddq_s32(a, b) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] + fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vrhaddq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] + fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] + fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] + fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] + fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] + fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] + fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] + fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] + fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] + fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] + fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] + fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} + +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] + fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] + fn _vrndn_f32(a: float32x2_t) -> float32x2_t; + } + _vrndn_f32(a) +} + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] + fn _vrndn_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrndn_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v4f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] + fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndnq_f32(a) +} + +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v4f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] + fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrndnq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i8" + )] + fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vrshl_s8(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i8" + )] + fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrshl_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v16i8" + )] + fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vrshlq_s8(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v16i8" + )] + fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vrshlq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i16" + )] + fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vrshl_s16(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i16" + )] + fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vrshl_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i16" + )] + fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrshlq_s16(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i16" + )] + fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vrshlq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i32" + )] + fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrshl_s32(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i32" + )] + fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vrshl_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i32" + )] + fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrshlq_s32(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i32" + )] + fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vrshlq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v1i64" + )] + fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vrshl_s64(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i64" + )] + fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vrshlq_s64(a, b) +} + +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i64" + )] + fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vrshlq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i8" + )] + fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vrshl_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i8" + )] + fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vrshl_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v16i8" + )] + fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vrshlq_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v16i8" + )] + fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vrshlq_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i16" + )] + fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vrshl_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i16" + )] + fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vrshl_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i16" + )] + fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrshlq_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i16" + )] + fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vrshlq_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i32" + )] + fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrshl_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i32" + )] + fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vrshl_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i32" + )] + fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrshlq_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i32" + )] + fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vrshlq_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v1i64" + )] + fn _vrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vrshl_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i64" + )] + fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vrshlq_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i64" + )] + fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vrshlq_u64(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_s8(a, vdup_n_s8(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vrshl_s8(a, vdup_n_s8(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_s8(a, vdupq_n_s8(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vrshlq_s8(a, vdupq_n_s8(-N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_s16(a, vdup_n_s16(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = vrshl_s16(a, vdup_n_s16(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_s16(a, vdupq_n_s16(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vrshlq_s16(a, vdupq_n_s16(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_s32(a, vdup_n_s32(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = vrshl_s32(a, vdup_n_s32(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_s32(a, vdupq_n_s32(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = vrshlq_s32(a, vdupq_n_s32(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_s64(a, vdup_n_s64(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_s64(a, vdupq_n_s64(-N as _)) +} + +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = vrshlq_s64(a, vdupq_n_s64(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_u8(a, vdup_n_s8(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = vrshl_u8(a, vdup_n_s8(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_u8(a, vdupq_n_s8(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = vrshlq_u8(a, vdupq_n_s8(-N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_u16(a, vdup_n_s16(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = vrshl_u16(a, vdup_n_s16(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_u16(a, vdupq_n_s16(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = vrshlq_u16(a, vdupq_n_s16(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_u32(a, vdup_n_s32(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = vrshl_u32(a, vdup_n_s32(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_u32(a, vdupq_n_s32(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = vrshlq_u32(a, vdupq_n_s32(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_u64(a, vdup_n_s64(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_u64(a, vdupq_n_s64(-N as _)) +} + +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = vrshlq_u64(a, vdupq_n_s64(-N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] + fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] + fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] + fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] + fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] + fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] + fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v8i8" + )] + fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vrshrn_n_s16(a, N) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v8i8" + )] + fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrshrn_n_s16(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v4i16" + )] + fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vrshrn_n_s32(a, N) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v4i16" + )] + fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vrshrn_n_s32(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v2i32" + )] + fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vrshrn_n_s64(a, N) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v2i32" + )] + fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = _vrshrn_n_s64(a, N); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vrshrn_n_s16::(transmute(a))) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = transmute(vrshrn_n_s16::(transmute(a))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vrshrn_n_s32::(transmute(a))) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = transmute(vrshrn_n_s32::(transmute(a))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + transmute(vrshrn_n_s64::(transmute(a))) +} + +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rshrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = transmute(vrshrn_n_s64::(transmute(a))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f32" + )] + fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; + } + _vrsqrte_f32(a) +} + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f32" + )] + fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: float32x2_t = _vrsqrte_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v4f32" + )] + fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + } + _vrsqrteq_f32(a) +} + +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v4f32" + )] + fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrsqrteq_f32(a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v2i32" + )] + fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; + } + _vrsqrte_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v2i32" + )] + fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = _vrsqrte_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v4i32" + )] + fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; + } + _vrsqrteq_u32(a.as_signed()).as_unsigned() +} + +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursqrte) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v4i32" + )] + fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vrsqrteq_u32(a.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrts) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f32" + )] + fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vrsqrts_f32(a, b) +} + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrts) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f32" + )] + fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = _vrsqrts_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrts) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v4f32" + )] + fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vrsqrtsq_f32(a, b) +} + +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frsqrts) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v4f32" + )] + fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = _vrsqrtsq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshr_n_s8::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_add(a, vrshr_n_s8::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshrq_n_s8::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_add(a, vrshrq_n_s8::(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshr_n_s16::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_add(a, vrshr_n_s16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshrq_n_s16::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_add(a, vrshrq_n_s16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshr_n_s32::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_add(a, vrshr_n_s32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshrq_n_s32::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_add(a, vrshrq_n_s32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshr_n_s64::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshrq_n_s64::(b)) +} + +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srsra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_add(a, vrshrq_n_s64::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshr_n_u8::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_add(a, vrshr_n_u8::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshrq_n_u8::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_add(a, vrshrq_n_u8::(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshr_n_u16::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_add(a, vrshr_n_u16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshrq_n_u16::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_add(a, vrshrq_n_u16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshr_n_u32::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_add(a, vrshr_n_u32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshrq_n_u32::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_add(a, vrshrq_n_u32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshr_n_u64::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshrq_n_u64::(b)) +} + +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ursra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_add(a, vrshrq_n_u64::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v8i8" + )] + fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + _vrsubhn_s16(a, b) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v8i8" + )] + fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vrsubhn_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v4i16" + )] + fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + _vrsubhn_s32(a, b) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v4i16" + )] + fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vrsubhn_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v2i32" + )] + fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + _vrsubhn_s64(a, b) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v2i32" + )] + fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vrsubhn_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + transmute(vrsubhn_s16(transmute(a), transmute(b))) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vrsubhn_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + transmute(vrsubhn_s32(transmute(a), transmute(b))) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vrsubhn_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + transmute(vrsubhn_s64(transmute(a), transmute(b))) +} + +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(rsubhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vrsubhn_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) +} + +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "SHA1 hash update accelerator, choose."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1c))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1c" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] + fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() +} + +#[doc = "SHA1 hash update accelerator, choose."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1c))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1c" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] + fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 fixed rotate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1h" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] + fn _vsha1h_u32(hash_e: i32) -> i32; + } + _vsha1h_u32(hash_e.as_signed()).as_unsigned() +} + +#[doc = "SHA1 hash update accelerator, majority"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1m))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1m" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] + fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() +} + +#[doc = "SHA1 hash update accelerator, majority"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1m))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1m" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] + fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 hash update accelerator, parity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1p))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1p" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] + fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() +} + +#[doc = "SHA1 hash update accelerator, parity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1p))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1p" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] + fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + } + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1su0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] + fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; + } + _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned() +} + +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1su0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] + fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; + } + let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [0, 1, 2, 3]); + let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [0, 1, 2, 3]); + let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] + fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + } + _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned() +} + +#[doc = "SHA1 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] + fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + } + let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [0, 1, 2, 3]); + let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 schedule update accelerator, upper part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h2))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256h2" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] + fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + } + _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() +} + +#[doc = "SHA1 schedule update accelerator, upper part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h2))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256h2" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] + fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + } + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); + let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [0, 1, 2, 3]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256h" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] + fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + } + _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() +} + +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256h" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] + fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + } + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); + let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [0, 1, 2, 3]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA256 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] + fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; + } + _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned() +} + +#[doc = "SHA256 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] + fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; + } + let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [0, 1, 2, 3]); + let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "SHA256 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su1q_u32( + tw0_3: uint32x4_t, + w8_11: uint32x4_t, + w12_15: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] + fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + } + _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned() +} + +#[doc = "SHA256 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su1q_u32( + tw0_3: uint32x4_t, + w8_11: uint32x4_t, + w12_15: uint32x4_t, +) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] + fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + } + let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [0, 1, 2, 3]); + let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [0, 1, 2, 3]); + let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [0, 1, 2, 3]); + let ret_val: uint32x4_t = + _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] +unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] + fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vshiftins_v16i8(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] +unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] + fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vshiftins_v16i8(a, b, c); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v1i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] +unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] + fn _vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; + } + _vshiftins_v1i64(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] +unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] + fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + _vshiftins_v2i32(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] +unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] + fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int32x2_t = _vshiftins_v2i32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] +unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] + fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _vshiftins_v2i64(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] +unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] + fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let ret_val: int64x2_t = _vshiftins_v2i64(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] +unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] + fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + _vshiftins_v4i16(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] +unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] + fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vshiftins_v4i16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] +unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] + fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vshiftins_v4i32(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] +unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] + fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vshiftins_v4i32(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] +unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] + fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _vshiftins_v8i16(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] +unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] + fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vshiftins_v8i16(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] +unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] + fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + _vshiftins_v8i8(a, b, c) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] +unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] + fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vshiftins_v8i8(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdup_n_s8(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_shl(a, vdup_n_s8(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdupq_n_s8(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_shl(a, vdupq_n_s8(N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdup_n_s16(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_shl(a, vdup_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdupq_n_s16(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shl(a, vdupq_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdup_n_s32(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = simd_shl(a, vdup_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdupq_n_s32(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shl(a, vdupq_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdup_n_s64(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdupq_n_s64(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = simd_shl(a, vdupq_n_s64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdup_n_u8(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_shl(a, vdup_n_u8(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdupq_n_u8(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_shl(a, vdupq_n_u8(N as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdup_n_u16(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_shl(a, vdup_n_u16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdupq_n_u16(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shl(a, vdupq_n_u16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdup_n_u32(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = simd_shl(a, vdup_n_u32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdupq_n_u32(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shl(a, vdupq_n_u32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdup_n_u64(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdupq_n_u64(N as _)) +} + +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = simd_shl(a, vdupq_n_u64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v8i8" + )] + fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vshl_s8(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v8i8" + )] + fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vshl_s8(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v16i8" + )] + fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vshlq_s8(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v16i8" + )] + fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = _vshlq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v4i16" + )] + fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vshl_s16(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v4i16" + )] + fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = _vshl_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v8i16" + )] + fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vshlq_s16(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v8i16" + )] + fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = _vshlq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v2i32" + )] + fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vshl_s32(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v2i32" + )] + fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = _vshl_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v4i32" + )] + fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vshlq_s32(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v4i32" + )] + fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = _vshlq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v1i64" + )] + fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vshl_s64(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v2i64" + )] + fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vshlq_s64(a, b) +} + +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sshl.v2i64" + )] + fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = _vshlq_s64(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i8" + )] + fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vshl_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i8" + )] + fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = _vshl_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v16i8" + )] + fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vshlq_u8(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v16i8" + )] + fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = _vshlq_u8(a.as_signed(), b).as_unsigned(); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i16" + )] + fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vshl_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i16" + )] + fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = _vshl_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i16" + )] + fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vshlq_u16(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i16" + )] + fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = _vshlq_u16(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i32" + )] + fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vshl_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i32" + )] + fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = _vshl_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i32" + )] + fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vshlq_u32(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i32" + )] + fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = _vshlq_u32(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v1i64" + )] + fn _vshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vshl_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i64" + )] + fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vshlq_u64(a.as_signed(), b).as_unsigned() +} + +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i64" + )] + fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = _vshlq_u64(a.as_signed(), b).as_unsigned(); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + simd_shl(simd_cast(a), vdupq_n_s32(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_shl(simd_cast(a), vdupq_n_s32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + simd_shl(simd_cast(a), vdupq_n_s64(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int64x2_t = simd_shl(simd_cast(a), vdupq_n_s64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + simd_shl(simd_cast(a), vdupq_n_s16(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_shl(simd_cast(a), vdupq_n_s16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + simd_shl(simd_cast(a), vdupq_n_u32(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_shl(simd_cast(a), vdupq_n_u32(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + simd_shl(simd_cast(a), vdupq_n_u64(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint64x2_t = simd_shl(simd_cast(a), vdupq_n_u64(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + simd_shl(simd_cast(a), vdupq_n_u16(N as _)) +} + +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushll, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_shl(simd_cast(a), vdupq_n_u16(N as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + simd_shr(a, vdup_n_s8(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let n: i32 = if N == 8 { 7 } else { N }; + let ret_val: int8x8_t = simd_shr(a, vdup_n_s8(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + simd_shr(a, vdupq_n_s8(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let n: i32 = if N == 8 { 7 } else { N }; + let ret_val: int8x16_t = simd_shr(a, vdupq_n_s8(n as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + simd_shr(a, vdup_n_s16(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let n: i32 = if N == 16 { 15 } else { N }; + let ret_val: int16x4_t = simd_shr(a, vdup_n_s16(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + simd_shr(a, vdupq_n_s16(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let n: i32 = if N == 16 { 15 } else { N }; + let ret_val: int16x8_t = simd_shr(a, vdupq_n_s16(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + simd_shr(a, vdup_n_s32(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let n: i32 = if N == 32 { 31 } else { N }; + let ret_val: int32x2_t = simd_shr(a, vdup_n_s32(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + simd_shr(a, vdupq_n_s32(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let n: i32 = if N == 32 { 31 } else { N }; + let ret_val: int32x4_t = simd_shr(a, vdupq_n_s32(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + simd_shr(a, vdup_n_s64(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + simd_shr(a, vdupq_n_s64(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let n: i32 = if N == 64 { 63 } else { N }; + let ret_val: int64x2_t = simd_shr(a, vdupq_n_s64(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdup_n_u8(0); + } else { + N + }; + simd_shr(a, vdup_n_u8(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let n: i32 = if N == 8 { + return vdup_n_u8(0); + } else { + N + }; + let ret_val: uint8x8_t = simd_shr(a, vdup_n_u8(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdupq_n_u8(0); + } else { + N + }; + simd_shr(a, vdupq_n_u8(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let n: i32 = if N == 8 { + return vdupq_n_u8(0); + } else { + N + }; + let ret_val: uint8x16_t = simd_shr(a, vdupq_n_u8(n as _)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdup_n_u16(0); + } else { + N + }; + simd_shr(a, vdup_n_u16(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let n: i32 = if N == 16 { + return vdup_n_u16(0); + } else { + N + }; + let ret_val: uint16x4_t = simd_shr(a, vdup_n_u16(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdupq_n_u16(0); + } else { + N + }; + simd_shr(a, vdupq_n_u16(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let n: i32 = if N == 16 { + return vdupq_n_u16(0); + } else { + N + }; + let ret_val: uint16x8_t = simd_shr(a, vdupq_n_u16(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdup_n_u32(0); + } else { + N + }; + simd_shr(a, vdup_n_u32(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let n: i32 = if N == 32 { + return vdup_n_u32(0); + } else { + N + }; + let ret_val: uint32x2_t = simd_shr(a, vdup_n_u32(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdupq_n_u32(0); + } else { + N + }; + simd_shr(a, vdupq_n_u32(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let n: i32 = if N == 32 { + return vdupq_n_u32(0); + } else { + N + }; + let ret_val: uint32x4_t = simd_shr(a, vdupq_n_u32(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdup_n_u64(0); + } else { + N + }; + simd_shr(a, vdup_n_u64(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdupq_n_u64(0); + } else { + N + }; + simd_shr(a, vdupq_n_u64(n as _)) +} + +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let n: i32 = if N == 64 { + return vdupq_n_u64(0); + } else { + N + }; + let ret_val: uint64x2_t = simd_shr(a, vdupq_n_u64(n as _)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_cast(simd_shr(a, vdupq_n_s16(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_cast(simd_shr(a, vdupq_n_s16(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_cast(simd_shr(a, vdupq_n_s32(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_cast(simd_shr(a, vdupq_n_s32(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_cast(simd_shr(a, vdupq_n_s64(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: int32x2_t = simd_cast(simd_shr(a, vdupq_n_s64(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_cast(simd_shr(a, vdupq_n_u16(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_cast(simd_shr(a, vdupq_n_u16(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_cast(simd_shr(a, vdupq_n_u32(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_cast(simd_shr(a, vdupq_n_u32(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_cast(simd_shr(a, vdupq_n_u64(N as _))) +} + +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shrn, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let ret_val: uint32x2_t = simd_cast(simd_shr(a, vdupq_n_u64(N as _))); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(0 >= N && N <= 31); + vshiftins_v2i32(a, b, int32x2_t::splat(N)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(0 >= N && N <= 31); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(N)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(0 >= N && N <= 31); + vshiftins_v4i32(a, b, int32x4_t::splat(N)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(0 >= N && N <= 31); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(N)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(0 >= N && N <= 63); + vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(0 >= N && N <= 63); + vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(0 >= N && N <= 63); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(0 >= N && N <= 31); + transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(N as i32), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(0 >= N && N <= 31); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(N as i32), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(0 >= N && N <= 31); + transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(N as i32), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(0 >= N && N <= 31); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(N as i32), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(0 >= N && N <= 63); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(0 >= N && N <= 63); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(0 >= N && N <= 63); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(0 >= N && N <= 63); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(0 >= N && N <= 63); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(0 >= N && N <= 63); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshr_n_s8::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_add(a, vshr_n_s8::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshrq_n_s8::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_add(a, vshrq_n_s8::(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshr_n_s16::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_add(a, vshr_n_s16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshrq_n_s16::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_add(a, vshrq_n_s16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshr_n_s32::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_add(a, vshr_n_s32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshrq_n_s32::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_add(a, vshrq_n_s32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshr_n_s64::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshrq_n_s64::(b)) +} + +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_add(a, vshrq_n_s64::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshr_n_u8::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_add(a, vshr_n_u8::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshrq_n_u8::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_add(a, vshrq_n_u8::(b)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshr_n_u16::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_add(a, vshr_n_u16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshrq_n_u16::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_add(a, vshrq_n_u16::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshr_n_u32::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_add(a, vshr_n_u32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshrq_n_u32::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_add(a, vshrq_n_u32::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshr_n_u64::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshrq_n_u64::(b)) +} + +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_add(a, vshrq_n_u64::(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(1 <= N && N <= 8); + vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(1 <= N && N <= 8); + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(1 <= N && N <= 8); + vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(1 <= N && N <= 8); + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(1 <= N && N <= 16); + vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(1 <= N && N <= 16); + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(1 <= N && N <= 16); + vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(1 <= N && N <= 16); + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(1 <= N && N <= 32); + vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(1 <= N && N <= 32); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(1 <= N && N <= 32); + vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(1 <= N && N <= 32); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(1 <= N && N <= 64); + vshiftins_v1i64(a, b, int64x1_t::splat(-N as i64)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(1 <= N && N <= 64); + vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(1 <= N && N <= 64); + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(1 <= N && N <= 8); + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(1 <= N && N <= 8); + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(1 <= N && N <= 16); + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(1 <= N && N <= 16); + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(1 <= N && N <= 32); + transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(-N), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(1 <= N && N <= 32); + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(-N), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(1 <= N && N <= 32); + transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(-N), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(1 <= N && N <= 32); + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(-N), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(1 <= N && N <= 64); + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(1 <= N && N <= 8); + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly8x8_t = transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(1 <= N && N <= 8); + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: poly8x16_t = transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(1 <= N && N <= 16); + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: poly16x4_t = transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(1 <= N && N <= 16); + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: poly16x8_t = transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(1 <= N && N <= 64); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + vst1_v2f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + vst1_v2f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + vst1q_v4f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1q_v4f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { + vst1_v1i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + vst1_v2i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + vst1_v2i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + vst1q_v4i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1q_v4i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { + vst1_v1i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { + vst1_v1i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] + fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); + } + _vst1_f32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] + fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1_f32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] + fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); + } + _vst1q_f32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] + fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1q_f32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" + )] + fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" + )] + fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1_f32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" + )] + fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); + } + _vst1q_f32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" + )] + fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1q_f32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] + fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); + } + _vst1_f32_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] + fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1_f32_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] + fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); + } + _vst1q_f32_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] + fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1q_f32_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" + )] + fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" + )] + fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1_f32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" + )] + fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); + } + _vst1q_f32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" + )] + fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1q_f32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] + fn _vst1_f32_x4( + ptr: *mut f32, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ); + } + _vst1_f32_x4(a, b.0, b.1, b.2, b.3) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] + fn _vst1_f32_x4( + ptr: *mut f32, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ); + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1_f32_x4(a, b.0, b.1, b.2, b.3) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] + fn _vst1q_f32_x4( + ptr: *mut f32, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ); + } + _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] + fn _vst1q_f32_x4( + ptr: *mut f32, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ); + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" + )] + fn _vst1_f32_x4( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ptr: *mut f32, + ); + } + _vst1_f32_x4(b.0, b.1, b.2, b.3, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" + )] + fn _vst1_f32_x4( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ptr: *mut f32, + ); + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1_f32_x4(b.0, b.1, b.2, b.3, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" + )] + fn _vst1q_f32_x4( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ptr: *mut f32, + ); + } + _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" + )] + fn _vst1q_f32_x4( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ptr: *mut f32, + ); + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { + static_assert_uimm_bits!(LANE, 4); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { + static_assert_uimm_bits!(LANE, 4); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { + static_assert_uimm_bits!(LANE, 4); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { + static_assert_uimm_bits!(LANE, 2); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { + static_assert_uimm_bits!(LANE, 3); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { + let mut b: poly64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst1q_s64_x2(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { + let mut b: poly64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst1q_s64_x3(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { + let mut b: poly64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst1q_s64_x4(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" + )] + fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + _vst1_s8_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" + )] + fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" + )] + fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + } + _vst1q_s8_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" + )] + fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + } + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" + )] + fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); + } + _vst1_s16_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" + )] + fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1_s16_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" + )] + fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); + } + _vst1q_s16_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" + )] + fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" + )] + fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); + } + _vst1_s32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" + )] + fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); + } + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1_s32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" + )] + fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); + } + _vst1q_s32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" + )] + fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); + } + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1q_s32_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" + )] + fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" + )] + fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" + )] + fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); + } + let mut b: int64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1q_s64_x2(b.0, b.1, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] + fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); + } + _vst1_s8_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] + fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] + fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); + } + _vst1q_s8_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] + fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); + } + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] + fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); + } + _vst1_s16_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] + fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1_s16_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] + fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); + } + _vst1q_s16_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] + fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] + fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); + } + _vst1_s32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] + fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); + } + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1_s32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] + fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); + } + _vst1q_s32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] + fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); + } + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst1q_s32_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v1i64")] + fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); + } + _vst1_s64_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] + fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); + } + _vst1q_s64_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] + fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); + } + let mut b: int64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst1q_s64_x2(a, b.0, b.1) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" + )] + fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + _vst1_s8_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" + )] + fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" + )] + fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + _vst1q_s8_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" + )] + fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" + )] + fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); + } + _vst1_s16_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" + )] + fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1_s16_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" + )] + fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); + } + _vst1q_s16_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" + )] + fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" + )] + fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); + } + _vst1_s32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" + )] + fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1_s32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" + )] + fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); + } + _vst1q_s32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" + )] + fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1q_s32_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" + )] + fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x3(b.0, b.1, b.2, a) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" + )] + fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x3(b.0, b.1, b.2, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" + )] + fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + } + let mut b: int64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1q_s64_x3(b.0, b.1, b.2, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] + fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); + } + _vst1_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] + fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] + fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); + } + _vst1q_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] + fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] + fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); + } + _vst1_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] + fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] + fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); + } + _vst1q_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] + fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] + fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); + } + _vst1_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] + fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] + fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); + } + _vst1q_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] + fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst1q_s32_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64")] + fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); + } + _vst1_s64_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] + fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); + } + _vst1q_s64_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] + fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); + } + let mut b: int64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst1q_s64_x3(a, b.0, b.1, b.2) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" + )] + fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); + } + _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" + )] + fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" + )] + fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); + } + _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" + )] + fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); + } + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" + )] + fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); + } + _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" + )] + fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" + )] + fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); + } + _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" + )] + fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" + )] + fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); + } + _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" + )] + fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" + )] + fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); + } + _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" + )] + fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" + )] + fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" + )] + fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - transmute(a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" + )] + fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); + } + let mut b: int64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] + fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); + } + _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] + fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] + fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); + } + _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] + fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); + } + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] + fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); + } + _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] + fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] + fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); + } + _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] + fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] + fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + } + _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] + fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] + fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + } + _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] + fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64")] + fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); + } + _vst1_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] + fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + } + _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] + fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + } + let mut b: int64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -32961,20 +101941,22 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { - transmute(a) +pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -32984,20 +101966,25 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - transmute(a) +pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { + let mut b: uint8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33007,20 +101994,22 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33030,20 +102019,26 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { + let mut b: uint8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33053,20 +102048,22 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33076,20 +102073,27 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { + let mut b: uint8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33099,20 +102103,22 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33122,20 +102128,33 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - transmute(a) +pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { + let mut b: uint8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33145,20 +102164,22 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33168,20 +102189,38 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { + let mut b: uint8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33191,20 +102230,22 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - transmute(a) +pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33214,20 +102255,43 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { + let mut b: uint8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33237,20 +102301,22 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33260,20 +102326,25 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { + let mut b: uint16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33283,20 +102354,22 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33306,20 +102379,26 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { + let mut b: uint16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33329,20 +102408,22 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33352,20 +102433,27 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { + let mut b: uint16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33375,20 +102463,22 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33398,20 +102488,25 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { + let mut b: uint16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33421,20 +102516,22 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - transmute(a) +pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33444,20 +102541,26 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { + let mut b: uint16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33467,20 +102570,22 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - transmute(a) +pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33490,20 +102595,27 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { + let mut b: uint16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33513,20 +102625,22 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { + vst1_s32_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33536,20 +102650,25 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { + let mut b: uint32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst1_s32_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33559,20 +102678,22 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { + vst1_s32_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33582,20 +102703,26 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { + let mut b: uint32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst1_s32_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33605,20 +102732,22 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { + vst1_s32_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33628,20 +102757,27 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - transmute(a) +pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { + let mut b: uint32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst1_s32_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33651,20 +102787,22 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - transmute(a) +pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { + vst1q_s32_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33674,20 +102812,25 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - transmute(a) +pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { + let mut b: uint32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst1q_s32_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33697,20 +102840,22 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { + vst1q_s32_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33720,20 +102865,26 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { + let mut b: uint32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst1q_s32_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33743,20 +102894,22 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { + vst1q_s32_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33766,20 +102919,26 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { + let mut b: uint32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst1q_s32_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33789,20 +102948,21 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33812,20 +102972,21 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - transmute(a) +pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33835,20 +102996,22 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - transmute(a) +pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33858,20 +103021,22 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - transmute(a) +pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33881,20 +103046,25 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - transmute(a) +pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { + let mut b: uint64x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33904,20 +103074,22 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33927,20 +103099,26 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { + let mut b: uint64x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33950,20 +103128,22 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33973,20 +103153,27 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { + let mut b: uint64x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst1q_s64_x4(transmute(a), transmute(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -33996,20 +103183,22 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34019,28 +103208,25 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] - fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vrhadd_s8(a, b) +pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { + let mut b: poly8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34050,28 +103236,22 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] - fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrhaddq_s8(a, b) +pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34081,28 +103261,26 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] - fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrhadd_s16(a, b) +pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { + let mut b: poly8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34112,28 +103290,22 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] - fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrhaddq_s16(a, b) +pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34143,28 +103315,27 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] - fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrhadd_s32(a, b) +pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { + let mut b: poly8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34174,28 +103345,22 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] - fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrhaddq_s32(a, b) +pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34205,28 +103370,33 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] - fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { + let mut b: poly8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34236,28 +103406,22 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] - fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34267,28 +103431,38 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] - fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { + let mut b: poly8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34298,28 +103472,22 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] - fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34329,28 +103497,43 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] - fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { + let mut b: poly8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34360,28 +103543,22 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] - fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34391,28 +103568,25 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] - fn _vrndn_f32(a: float32x2_t) -> float32x2_t; - } - _vrndn_f32(a) +pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { + let mut b: poly16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34422,28 +103596,22 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v4f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] - fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndnq_f32(a) +pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34453,28 +103621,26 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i8" - )] - fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vrshl_s8(a, b) +pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { + let mut b: poly16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34484,28 +103650,22 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v16i8" - )] - fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrshlq_s8(a, b) +pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34515,28 +103675,27 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i16" - )] - fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrshl_s16(a, b) +pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { + let mut b: poly16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34546,28 +103705,22 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i16" - )] - fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrshlq_s16(a, b) +pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34577,28 +103730,25 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i32" - )] - fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrshl_s32(a, b) +pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { + let mut b: poly16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34608,28 +103758,22 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i32" - )] - fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrshlq_s32(a, b) +pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34639,28 +103783,26 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v1i64" - )] - fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vrshl_s64(a, b) +pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { + let mut b: poly16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34670,28 +103812,22 @@ pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i64" - )] - fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vrshlq_s64(a, b) +pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] + +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -34701,29 +103837,402 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i8" - )] - fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { + let mut b: poly16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst1q_s16_x4(transmute(a), transmute(b)) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v1i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v1i64")] + fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); } - _vrshl_u8(a.as_signed(), b).as_unsigned() + _vst1_v1i64(addr, val, align) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2f32")] + fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); + } + _vst1_v2f32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2f32")] + fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); + } + let val: float32x2_t = simd_shuffle!(val, val, [0, 1]); + _vst1_v2f32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i32")] + fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); + } + _vst1_v2i32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i32")] + fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); + } + let val: int32x2_t = simd_shuffle!(val, val, [0, 1]); + _vst1_v2i32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i16")] + fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); + } + _vst1_v4i16(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i16")] + fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); + } + let val: int16x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); + _vst1_v4i16(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i8")] + fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); + } + _vst1_v8i8(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i8")] + fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); + } + let val: int8x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1_v8i8(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v16i8")] + fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); + } + _vst1q_v16i8(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v16i8")] + fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); + } + let val: int8x16_t = simd_shuffle!( + val, + val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst1q_v16i8(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i64")] + fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); + } + _vst1q_v2i64(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i64")] + fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); + } + let val: int64x2_t = simd_shuffle!(val, val, [0, 1]); + _vst1q_v2i64(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4f32")] + fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); + } + _vst1q_v4f32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4f32")] + fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); + } + let val: float32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); + _vst1q_v4f32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i32")] + fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); + } + _vst1q_v4i32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i32")] + fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); + } + let val: int32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); + _vst1q_v4i32(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i16")] + fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); + } + _vst1q_v8i16(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i16")] + fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); + } + let val: int16x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst1q_v8i16(addr, val, align) +} + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34732,29 +104241,25 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v16i8" - )] - fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrshlq_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] + +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34763,1147 +104268,1356 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] +pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { + static_assert_uimm_bits!(LANE, 1); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + *a = simd_extract!(b, LANE as u32); +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i16" + link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" )] - fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); } - _vrshl_u16(a.as_signed(), b).as_unsigned() + _vst2_f32(b.0, b.1, a as _) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" + )] + fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_f32(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" + )] + fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); + } + _vst2q_f32(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" + )] + fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_f32(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" + )] + fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + _vst2_s8(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" + )] + fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2_s8(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" + )] + fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + } + _vst2q_s8(b.0, b.1, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i16" + link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" )] - fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); } - _vrshlq_u16(a.as_signed(), b).as_unsigned() + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst2q_s8(b.0, b.1, a as _) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i32" + link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" )] - fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); } - _vrshl_u32(a.as_signed(), b).as_unsigned() + _vst2_s16(b.0, b.1, a as _) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i32" + link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" )] - fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); } - _vrshlq_u32(a.as_signed(), b).as_unsigned() + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2_s16(b.0, b.1, a as _) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v1i64" + link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" )] - fn _vrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); } - _vrshl_u64(a.as_signed(), b).as_unsigned() + _vst2q_s16(b.0, b.1, a as _) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i64" + link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" )] - fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); } - _vrshlq_u64(a.as_signed(), b).as_unsigned() + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2q_s16(b.0, b.1, a as _) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_s8(a, vdup_n_s8(-N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" + )] + fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); + } + _vst2_s32(b.0, b.1, a as _) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_s8(a, vdupq_n_s8(-N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" + )] + fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); + } + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_s32(b.0, b.1, a as _) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_s16(a, vdup_n_s16(-N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" + )] + fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); + } + _vst2q_s32(b.0, b.1, a as _) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_s16(a, vdupq_n_s16(-N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" + )] + fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); + } + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_s32(b.0, b.1, a as _) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_s32(a, vdup_n_s32(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] + fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); + } + _vst2_f32(a as _, b.0, b.1, 4) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_s32(a, vdupq_n_s32(-N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] + fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); + } + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_f32(a as _, b.0, b.1, 4) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_s64(a, vdup_n_s64(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] + fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); + } + _vst2q_f32(a as _, b.0, b.1, 4) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_s64(a, vdupq_n_s64(-N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] + fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_f32(a as _, b.0, b.1, 4) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_u8(a, vdup_n_s8(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] + fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); + } + _vst2_s8(a as _, b.0, b.1, 1) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_u8(a, vdupq_n_s8(-N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] + fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2_s8(a as _, b.0, b.1, 1) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_u16(a, vdup_n_s16(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] + fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); + } + _vst2q_s8(a as _, b.0, b.1, 1) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] + fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); + } + let mut b: int8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst2q_s8(a as _, b.0, b.1, 1) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_u16(a, vdupq_n_s16(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] + fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); + } + _vst2_s16(a as _, b.0, b.1, 2) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_u32(a, vdup_n_s32(-N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] + fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2_s16(a as _, b.0, b.1, 2) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_u32(a, vdupq_n_s32(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] + fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); + } + _vst2q_s16(a as _, b.0, b.1, 2) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_u64(a, vdup_n_s64(-N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] + fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2q_s16(a as _, b.0, b.1, 2) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_u64(a, vdupq_n_s64(-N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] + fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); + } + _vst2_s32(a as _, b.0, b.1, 4) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] - fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] + fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } - _vrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_s32(a as _, b.0, b.1, 4) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] - fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] + fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } - _vrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) + _vst2q_s32(a as _, b.0, b.1, 4) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] - fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] + fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } - _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_s32(a as _, b.0, b.1, 4) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v8i8" + link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" )] - fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); } - _vrshrn_n_s16(a, N) + _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v4i16" + link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" )] - fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); } - _vrshrn_n_s32(a, N) + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v2i32" + link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" )] - fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); } - _vrshrn_n_s64(a, N) + _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vrshrn_n_s16::(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" + )] + fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vrshrn_n_s32::(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" + )] + fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - transmute(vrshrn_n_s64::(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" + )] + fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" + )] + fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" + )] + fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" + )] + fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" + )] + fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f32" + link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" )] - fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; + fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); } - _vrsqrte_f32(a) + _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v4f32" + link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" )] - fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); } - _vrsqrteq_f32(a) + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v2i32" + link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" )] - fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; + fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); } - _vrsqrte_u32(a.as_signed()).as_unsigned() + _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v4i32" + link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" )] - fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; + fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); } - _vrsqrteq_u32(a.as_signed()).as_unsigned() + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f32" - )] - fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] + fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } - _vrsqrts_f32(a, b) + _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v4f32" - )] - fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] + fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } - _vrsqrtsq_f32(a, b) + let mut b: float32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshr_n_s8::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] + fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); + } + _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshrq_n_s8::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] + fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); + } + let mut b: float32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshr_n_s16::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] + fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); + } + _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshrq_n_s16::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] + fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); + } + let mut b: int8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshr_n_s32::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] + fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); + } + _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshrq_n_s32::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] + fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); + } + let mut b: int16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshr_n_s64::(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] + fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); + } + _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] + fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); + } + let mut b: int16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] + fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + } + _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] + fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + } + let mut b: int32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] + fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + } + _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] + fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + } + let mut b: int32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) +} + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -35914,21 +105628,23 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshrq_n_s64::(b)) +pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -35939,21 +105655,26 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshr_n_u8::(b)) +pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -35964,21 +105685,23 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshrq_n_u8::(b)) +pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -35989,21 +105712,26 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshr_n_u16::(b)) +pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36014,21 +105742,23 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshrq_n_u16::(b)) +pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36039,21 +105769,26 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshr_n_u32::(b)) +pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36064,21 +105799,23 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshrq_n_u32::(b)) +pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36089,21 +105826,26 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshr_n_u64::(b)) +pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36114,22 +105856,25 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshrq_n_u64::(b)) +pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36138,29 +105883,28 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v8i8" - )] - fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - _vrsubhn_s16(a, b) +pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36169,29 +105913,25 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v4i16" - )] - fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - _vrsubhn_s32(a, b) +pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36200,29 +105940,28 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v2i32" - )] - fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - _vrsubhn_s64(a, b) +pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36231,21 +105970,25 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - transmute(vrsubhn_s16(transmute(a), transmute(b))) +pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36254,21 +105997,28 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - transmute(vrsubhn_s32(transmute(a), transmute(b))) +pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36277,20 +106027,23 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - transmute(vrsubhn_s64(transmute(a), transmute(b))) +pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2, LANE = 0) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -36301,23 +106054,26 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36326,73 +106082,59 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v1i64")] + fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); + } + _vst2_s64(a as _, b.0, b.1, 8) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" + )] + fn _vst2_s64(a: int64x1_t, b: int64x1_t, ptr: *mut i8); + } + _vst2_s64(b.0, b.1, a as _) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36401,23 +106143,23 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36426,23 +106168,23 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36451,23 +106193,26 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { + let mut b: uint8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36476,23 +106221,23 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36501,23 +106246,34 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { + let mut b: uint8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36526,23 +106282,23 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36551,23 +106307,26 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { + let mut b: uint16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36576,23 +106335,23 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36601,23 +106360,26 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { + let mut b: uint16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36626,23 +106388,23 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { + vst2_s32(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36651,23 +106413,26 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { + let mut b: uint32x2x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + vst2_s32(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36676,23 +106441,23 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { + vst2q_s32(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36701,23 +106466,26 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { + let mut b: uint32x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2q_s32(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36726,23 +106494,23 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36751,23 +106519,26 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { + let mut b: poly8x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36776,23 +106547,23 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36801,23 +106572,34 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { + let mut b: poly8x16x2_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36826,23 +106608,23 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36851,23 +106633,26 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { + let mut b: poly16x4x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36876,23 +106661,23 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] + +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(st2) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36901,1044 +106686,1481 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdup_n_s8(N as _)) +pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { + let mut b: poly16x8x2_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] + fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); + } + _vst3_f32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] + fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_f32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] + fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); + } + _vst3q_f32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] + fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_f32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] + fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); + } + _vst3_s8(a as _, b.0, b.1, b.2, 1) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] + fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3_s8(a as _, b.0, b.1, b.2, 1) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] + fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); + } + _vst3q_s8(a as _, b.0, b.1, b.2, 1) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] + fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst3q_s8(a as _, b.0, b.1, b.2, 1) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] + fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); + } + _vst3_s16(a as _, b.0, b.1, b.2, 2) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] + fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3_s16(a as _, b.0, b.1, b.2, 2) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] + fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); + } + _vst3q_s16(a as _, b.0, b.1, b.2, 2) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] + fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3q_s16(a as _, b.0, b.1, b.2, 2) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] + fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); + } + _vst3_s32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] + fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_s32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] + fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); + } + _vst3q_s32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] + fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_s32(a as _, b.0, b.1, b.2, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" + )] + fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); + } + _vst3_f32(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" + )] + fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_f32(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" + )] + fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); + } + _vst3q_f32(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" + )] + fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_f32(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" + )] + fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + _vst3_s8(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdupq_n_s8(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" + )] + fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3_s8(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdup_n_s16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" + )] + fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + _vst3q_s8(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdupq_n_s16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" + )] + fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + let mut b: int8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst3q_s8(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdup_n_s32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" + )] + fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); + } + _vst3_s16(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdupq_n_s32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" + )] + fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); + } + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3_s16(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdup_n_s64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" + )] + fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); + } + _vst3q_s16(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdupq_n_s64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" + )] + fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3q_s16(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdup_n_u8(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" + )] + fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); + } + _vst3_s32(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdupq_n_u8(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" + )] + fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_s32(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdup_n_u16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" + )] + fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); + } + _vst3q_s32(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdupq_n_u16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" + )] + fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_s32(b.0, b.1, b.2, a as _) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdup_n_u32(N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] + fn _vst3_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ); + } + _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdupq_n_u32(N as _)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] + fn _vst3_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdup_n_u64(N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] + fn _vst3q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ); + } + _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] + fn _vst3q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ); + } + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdupq_n_u64(N as _)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] + fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); + } + _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i8" - )] - fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] + fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); } - _vshl_s8(a, b) + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v16i8" - )] - fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] + fn _vst3_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ); } - _vshlq_s8(a, b) + _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i16" - )] - fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] + fn _vst3_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ); } - _vshl_s16(a, b) + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i16" - )] - fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] + fn _vst3q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ); } - _vshlq_s16(a, b) + _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i32" - )] - fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] + fn _vst3q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ); } - _vshl_s32(a, b) + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i32" - )] - fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] + fn _vst3_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ); } - _vshlq_s32(a, b) + _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v1i64" - )] - fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] + fn _vst3_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ); } - _vshl_s64(a, b) + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i64" - )] - fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] + fn _vst3q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ); } - _vshlq_s64(a, b) + _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i8" - )] - fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] + fn _vst3q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ); } - _vshl_u8(a.as_signed(), b).as_unsigned() + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v16i8" + link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" )] - fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); } - _vshlq_u8(a.as_signed(), b).as_unsigned() + _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" + )] + fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); + } + let mut b: float32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i16" + link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" )] - fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); } - _vshl_u16(a.as_signed(), b).as_unsigned() + _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i16" + link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" )] - fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); } - _vshlq_u16(a.as_signed(), b).as_unsigned() + let mut b: float32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i32" + link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" )] - fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); } - _vshl_u32(a.as_signed(), b).as_unsigned() + _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i32" + link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" )] - fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); } - _vshlq_u32(a.as_signed(), b).as_unsigned() + let mut b: int8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v1i64" + link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" )] - fn _vshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); } - _vshl_u64(a.as_signed(), b).as_unsigned() + _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i64" + link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" )] - fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); } - _vshlq_u64(a.as_signed(), b).as_unsigned() + let mut b: int16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - simd_shl(simd_cast(a), vdupq_n_s32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" + )] + fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - simd_shl(simd_cast(a), vdupq_n_s64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" + )] + fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); + } + let mut b: int16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - simd_shl(simd_cast(a), vdupq_n_s16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" + )] + fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - simd_shl(simd_cast(a), vdupq_n_u32(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" + )] + fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); + } + let mut b: int32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - simd_shl(simd_cast(a), vdupq_n_u64(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" + )] + fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - simd_shl(simd_cast(a), vdupq_n_u16(N as _)) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" + )] + fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); + } + let mut b: int32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37947,24 +108169,25 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - simd_shr(a, vdup_n_s8(n as _)) +pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37973,24 +108196,29 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - simd_shr(a, vdupq_n_s8(n as _)) +pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37999,24 +108227,25 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - simd_shr(a, vdup_n_s16(n as _)) +pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38025,24 +108254,29 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - simd_shr(a, vdupq_n_s16(n as _)) +pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38051,24 +108285,25 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - simd_shr(a, vdup_n_s32(n as _)) +pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38077,24 +108312,29 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - simd_shr(a, vdupq_n_s32(n as _)) +pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38103,24 +108343,25 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - simd_shr(a, vdup_n_s64(n as _)) +pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38129,24 +108370,29 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - simd_shr(a, vdupq_n_s64(n as _)) +pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38155,28 +108401,25 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdup_n_u8(0); - } else { - N - }; - simd_shr(a, vdup_n_u8(n as _)) +pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38185,28 +108428,29 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdupq_n_u8(0); - } else { - N - }; - simd_shr(a, vdupq_n_u8(n as _)) +pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38215,28 +108459,25 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdup_n_u16(0); - } else { - N - }; - simd_shr(a, vdup_n_u16(n as _)) +pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38245,28 +108486,29 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdupq_n_u16(0); - } else { - N - }; - simd_shr(a, vdupq_n_u16(n as _)) +pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38275,28 +108517,25 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdup_n_u32(0); - } else { - N - }; - simd_shr(a, vdup_n_u32(n as _)) +pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38305,28 +108544,29 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdupq_n_u32(0); - } else { - N - }; - simd_shr(a, vdupq_n_u32(n as _)) +pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38335,28 +108575,25 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdup_n_u64(0); - } else { - N - }; - simd_shr(a, vdup_n_u64(n as _)) +pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(st3, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38365,28 +108602,27 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdupq_n_u64(0); - } else { - N - }; - simd_shr(a, vdupq_n_u64(n as _)) +pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38395,23 +108631,59 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_cast(simd_shr(a, vdupq_n_s16(N as _))) +pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" + )] + fn _vst3_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i8); + } + _vst3_s64(b.0, b.1, b.2, a as _) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] + fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); + } + _vst3_s64(a as _, b.0, b.1, b.2, 8) +} + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38420,23 +108692,23 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_cast(simd_shr(a, vdupq_n_s32(N as _))) +pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38445,23 +108717,23 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_cast(simd_shr(a, vdupq_n_s64(N as _))) +pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38470,23 +108742,27 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_cast(simd_shr(a, vdupq_n_u16(N as _))) +pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { + let mut b: uint8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38495,23 +108771,23 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_cast(simd_shr(a, vdupq_n_u32(N as _))) +pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38520,23 +108796,39 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_cast(simd_shr(a, vdupq_n_u64(N as _))) +pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { + let mut b: uint8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38545,23 +108837,23 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshr_n_s8::(b)) +pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38570,23 +108862,27 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshrq_n_s8::(b)) +pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { + let mut b: uint16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38595,23 +108891,23 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshr_n_s16::(b)) +pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38620,23 +108916,27 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshrq_n_s16::(b)) +pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { + let mut b: uint16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38645,23 +108945,23 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshr_n_s32::(b)) +pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { + vst3_s32(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38670,23 +108970,27 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshrq_n_s32::(b)) +pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { + let mut b: uint32x2x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + vst3_s32(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38695,23 +108999,23 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshr_n_s64::(b)) +pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { + vst3q_s32(transmute(a), transmute(b)) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38720,23 +109024,27 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshrq_n_s64::(b)) +pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { + let mut b: uint32x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3q_s32(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38745,23 +109053,23 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshr_n_u8::(b)) +pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38770,23 +109078,27 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshrq_n_u8::(b)) +pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { + let mut b: poly8x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38795,23 +109107,23 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshr_n_u16::(b)) +pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38820,23 +109132,39 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshrq_n_u16::(b)) +pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { + let mut b: poly8x16x3_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38845,23 +109173,23 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshr_n_u32::(b)) +pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38870,23 +109198,27 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshrq_n_u32::(b)) +pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { + let mut b: poly16x4x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38895,23 +109227,23 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshr_n_u64::(b)) +pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] + +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(st3) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -38920,1808 +109252,2418 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshrq_n_u64::(b)) +pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { + let mut b: poly16x8x3_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] - fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] + fn _vst4_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + size: i32, + ); } - _vst1_f32_x2(a, b.0, b.1) + _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] - fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] + fn _vst4_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + size: i32, + ); } - _vst1q_f32_x2(a, b.0, b.1) + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" - )] - fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] + fn _vst4q_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + size: i32, + ); } - _vst1_f32_x2(b.0, b.1, a) + _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" - )] - fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] + fn _vst4q_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + size: i32, + ); } - _vst1q_f32_x2(b.0, b.1, a) + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] - fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] + fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } - _vst1_f32_x3(a, b.0, b.1, b.2) + _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] - fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] + fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } - _vst1q_f32_x3(a, b.0, b.1, b.2) + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" - )] - fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] + fn _vst4q_s8( + ptr: *mut i8, + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + size: i32, + ); } - _vst1_f32_x3(b.0, b.1, b.2, a) + _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" - )] - fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] + fn _vst4q_s8( + ptr: *mut i8, + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + size: i32, + ); } - _vst1q_f32_x3(b.0, b.1, b.2, a) + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] - fn _vst1_f32_x4( - ptr: *mut f32, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] + fn _vst4_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + size: i32, ); } - _vst1_f32_x4(a, b.0, b.1, b.2, b.3) + _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] - fn _vst1q_f32_x4( - ptr: *mut f32, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] + fn _vst4_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + size: i32, ); } - _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" - )] - fn _vst1_f32_x4( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ptr: *mut f32, +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] + fn _vst4q_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + size: i32, ); } - _vst1_f32_x4(b.0, b.1, b.2, b.3, a) + _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" - )] - fn _vst1q_f32_x4( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ptr: *mut f32, +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] + fn _vst4q_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + size: i32, ); } - _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] + fn _vst4_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + size: i32, + ); + } + _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] + fn _vst4_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + size: i32, + ); + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) +} + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] + fn _vst4q_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + size: i32, + ); + } + _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) +} + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] + fn _vst4q_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + size: i32, + ); + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) +} + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" + )] + fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); + } + _vst4_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" + )] + fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" + )] + fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); + } + _vst4q_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" + )] + fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" + )] + fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); + } + _vst4_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" + )] + fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" + )] + fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); + } + _vst4q_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" + )] + fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); + } + let mut b: int8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + _vst4q_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" + )] + fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); + } + _vst4_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" + )] + fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" + )] + fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); + } + _vst4q_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" + )] + fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4q_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" + )] + fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); + } + _vst4_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" + )] + fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" + )] + fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); + } + _vst4q_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" + )] + fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); + } + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] + fn _vst4_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ); + } + _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] + fn _vst4_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ); + } + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] + fn _vst4q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ); + } + _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] + fn _vst4q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ); + } + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] + fn _vst4_lane_s8( + ptr: *mut i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ); + } + _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] + fn _vst4_lane_s8( + ptr: *mut i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ); + } + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] + fn _vst4_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ); + } + _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] + fn _vst4_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ); + } + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] + fn _vst4q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ); + } + _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] + fn _vst4q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ); + } + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] + fn _vst4_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ); + } + _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] + fn _vst4_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ); + } + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" - )] - fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] + fn _vst4q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ); } - _vst1_s8_x2(b.0, b.1, a) + _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" - )] - fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] + fn _vst4q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ); } - _vst1q_s8_x2(b.0, b.1, a) + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" + link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" )] - fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); + fn _vst4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s16_x2(b.0, b.1, a) + _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" + link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" )] - fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); + fn _vst4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s16_x2(b.0, b.1, a) + let mut b: float32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" + link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" )] - fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); + fn _vst4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s32_x2(b.0, b.1, a) + _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" + link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" )] - fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); + fn _vst4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s32_x2(b.0, b.1, a) + let mut b: float32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" + link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" )] - fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); + fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); } - _vst1_s64_x2(b.0, b.1, a) + _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" + link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" )] - fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] - fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); - } - _vst1_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] - fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); - } - _vst1q_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] - fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); - } - _vst1_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] - fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); - } - _vst1q_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] - fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); - } - _vst1_s32_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] - fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); - } - _vst1q_s32_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v1i64")] - fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); - } - _vst1_s64_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] - fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); + fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); } - _vst1q_s64_x2(a, b.0, b.1) + let mut b: int8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" + link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" )] - fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + fn _vst4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s8_x3(b.0, b.1, b.2, a) + _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" + link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" )] - fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + fn _vst4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s8_x3(b.0, b.1, b.2, a) + let mut b: int16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" + link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" )] - fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); + fn _vst4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s16_x3(b.0, b.1, b.2, a) + _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" + link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" )] - fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); + fn _vst4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s16_x3(b.0, b.1, b.2, a) + let mut b: int16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" + link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" )] - fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); + fn _vst4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s32_x3(b.0, b.1, b.2, a) + _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" + link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" )] - fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); + fn _vst4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s32_x3(b.0, b.1, b.2, a) + let mut b: int32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" + link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" )] - fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); + fn _vst4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1_s64_x3(b.0, b.1, b.2, a) + _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" + link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" )] - fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + fn _vst4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst1q_s64_x3(b.0, b.1, b.2, a) + let mut b: int32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] - fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); - } - _vst1_s8_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] - fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); - } - _vst1q_s8_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] - fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); - } - _vst1_s16_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] - fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); - } - _vst1q_s16_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] - fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); - } - _vst1_s32_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] - fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); - } - _vst1q_s32_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: uint16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64")] - fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); - } - _vst1_s64_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] - fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); - } - _vst1q_s64_x3(a, b.0, b.1, b.2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + let mut b: uint32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" - )] - fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - _vst1_s8_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" - )] - fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: uint32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" - )] - fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); - } - _vst1_s16_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" - )] - fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); - } - _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" - )] - fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); - } - _vst1_s32_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" - )] - fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); - } - _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + let mut b: poly16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" - )] - fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); - } - _vst1_s64_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" - )] - fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + let mut b: poly16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] - fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); - } - _vst1_s8_x4(a, b.0, b.1, b.2, b.3) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] - fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] + fn _vst4_s64( + ptr: *mut i8, + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + size: i32, + ); } - _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) + _vst4_s64(a as _, b.0, b.1, b.2, b.3, 8) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] - fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" + )] + fn _vst4_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i8); } - _vst1_s16_x4(a, b.0, b.1, b.2, b.3) + _vst4_s64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] - fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); - } - _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] - fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); - } - _vst1_s32_x4(a, b.0, b.1, b.2, b.3) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] - fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); - } - _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { + let mut b: uint8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64")] - fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); - } - _vst1_s64_x4(a, b.0, b.1, b.2, b.3) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] - fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); - } - _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { + let mut b: uint8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40731,20 +111673,22 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40754,20 +111698,27 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { + let mut b: uint16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40777,20 +111728,22 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40800,20 +111753,27 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { + let mut b: uint16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40823,20 +111783,22 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { + vst4_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40846,20 +111808,27 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { + let mut b: uint32x2x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1]); + vst4_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40869,20 +111838,22 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { + vst4q_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40892,20 +111863,27 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { + let mut b: uint32x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4q_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40915,20 +111893,22 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40938,20 +111918,27 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { + let mut b: poly8x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40961,20 +111948,22 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -40984,20 +111973,43 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { + let mut b: poly8x16x4_t = b; + b.0 = simd_shuffle!( + b.0, + b.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -41007,20 +112019,22 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { - vst1_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -41030,20 +112044,27 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { - vst1_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { + let mut b: poly16x4x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -41053,20 +112074,22 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { - vst1_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] + +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -41076,20 +112099,52 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { - vst1q_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { + let mut b: poly16x8x4_t = b; + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_sub(a, b) +} + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(fsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41099,20 +112154,25 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { - vst1q_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: float32x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(fsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41122,20 +112182,22 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { - vst1q_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(fsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41145,20 +112207,25 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: float32x4_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41168,20 +112235,22 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41191,20 +112260,25 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int16x4_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41214,20 +112288,22 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41237,20 +112313,25 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41260,20 +112341,22 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41283,20 +112366,25 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint16x4_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41306,20 +112394,22 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41329,20 +112419,25 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41352,20 +112447,22 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41375,20 +112472,25 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int32x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41398,20 +112500,22 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41421,20 +112525,25 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41444,20 +112553,22 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41467,20 +112578,25 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint32x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41490,20 +112606,22 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41513,20 +112631,24 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -41536,22 +112658,23 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_sub(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(sub) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -41560,576 +112683,435 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" - )] - fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); - } - _vst2_f32(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" - )] - fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); - } - _vst2q_f32(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" - )] - fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); - } - _vst2_s8(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" - )] - fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); - } - _vst2q_s8(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" - )] - fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); - } - _vst2_s16(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" - )] - fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); - } - _vst2q_s16(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" - )] - fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); - } - _vst2_s32(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" - )] - fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); - } - _vst2q_s32(b.0, b.1, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] - fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); - } - _vst2_f32(a as _, b.0, b.1, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] - fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); - } - _vst2q_f32(a as _, b.0, b.1, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] - fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); - } - _vst2_s8(a as _, b.0, b.1, 1) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] - fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); - } - _vst2q_s8(a as _, b.0, b.1, 1) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] - fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); - } - _vst2_s16(a as _, b.0, b.1, 2) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] - fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); - } - _vst2q_s16(a as _, b.0, b.1, 2) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] - fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); - } - _vst2_s32(a as _, b.0, b.1, 4) +pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] - fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); - } - _vst2q_s32(a as _, b.0, b.1, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" - )] - fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); - } - _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" - )] - fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" - )] - fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); - } - _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" - )] - fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); - } - _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" - )] - fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" - )] - fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); - } - _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" - )] - fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int8x16_t = simd_sub(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] - fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); - } - _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] - fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); - } - _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint8x8_t = simd_sub(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] - fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); - } - _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_sub(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] + +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] - fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); - } - _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: uint8x16_t = simd_sub(a, b); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] - fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); - } - _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let d: int8x8_t = vsubhn_s16(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] - fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); - } - _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = vsubhn_s16(b, c); + let ret_val: int8x16_t = + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] - fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); - } - _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let d: int16x4_t = vsubhn_s32(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42138,23 +113120,28 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: int16x4_t = vsubhn_s32(b, c); + let ret_val: int16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42163,23 +113150,24 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let d: int32x2_t = vsubhn_s64(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42188,23 +113176,28 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); + let d: int32x2_t = vsubhn_s64(b, c); + let ret_val: int32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42213,23 +113206,24 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let d: uint8x8_t = vsubhn_u16(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42238,23 +113232,33 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2q_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: uint8x8_t = vsubhn_u16(b, c); + let ret_val: uint8x16_t = + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42263,23 +113267,24 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let d: uint16x4_t = vsubhn_u32(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42288,23 +113293,28 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); + let d: uint16x4_t = vsubhn_u32(b, c); + let ret_val: uint16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -42313,21 +113323,23 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let d: uint32x2_t = vsubhn_u64(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -42337,55 +113349,27 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v1i64")] - fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); - } - _vst2_s64(a as _, b.0, b.1, 8) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" - )] - fn _vst2_s64(a: int64x1_t, b: int64x1_t, ptr: *mut i8); - } - _vst2_s64(b.0, b.1, a as _) +pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); + let d: uint32x2_t = vsubhn_u64(b, c); + let ret_val: uint32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42395,20 +113379,23 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42418,20 +113405,26 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + let ret_val: int8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42441,20 +113434,23 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + let c: i32x4 = i32x4::new(16, 16, 16, 16); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42464,20 +113460,26 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: i32x4 = i32x4::new(16, 16, 16, 16); + let ret_val: int16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42487,20 +113489,23 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + let c: i64x2 = i64x2::new(32, 32); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42510,20 +113515,26 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { - vst2_s32(transmute(a), transmute(b)) +pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: i64x2 = i64x2::new(32, 32); + let ret_val: int32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42533,20 +113544,23 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { - vst2q_s32(transmute(a), transmute(b)) +pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42556,20 +113570,26 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) +pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + let ret_val: uint8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42579,20 +113599,23 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let c: u32x4 = u32x4::new(16, 16, 16, 16); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42602,20 +113625,26 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: u32x4 = u32x4::new(16, 16, 16, 16); + let ret_val: uint16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -42625,617 +113654,526 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] - fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); - } - _vst3_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] - fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); - } - _vst3q_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] - fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); - } - _vst3_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] - fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); - } - _vst3q_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] - fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); - } - _vst3_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] - fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); - } - _vst3q_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] - fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); - } - _vst3_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] - fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); - } - _vst3q_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" - )] - fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); - } - _vst3_f32(b.0, b.1, b.2, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" - )] - fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); - } - _vst3q_f32(b.0, b.1, b.2, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" - )] - fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - _vst3_s8(b.0, b.1, b.2, a as _) +pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let c: u64x2 = u64x2::new(32, 32); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] + +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" - )] - fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - _vst3q_s8(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(subhn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); + let c: u64x2 = u64x2::new(32, 32); + let ret_val: uint32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" - )] - fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); - } - _vst3_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let c: int16x8_t = simd_cast(a); + let d: int16x8_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" - )] - fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); - } - _vst3q_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_cast(a); + let d: int16x8_t = simd_cast(b); + let ret_val: int16x8_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" - )] - fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); - } - _vst3_s32(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let c: int32x4_t = simd_cast(a); + let d: int32x4_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" - )] - fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); - } - _vst3q_s32(b.0, b.1, b.2, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] - fn _vst3_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ); - } - _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_cast(a); + let d: int32x4_t = simd_cast(b); + let ret_val: int32x4_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] - fn _vst3q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let c: int64x2_t = simd_cast(a); + let d: int64x2_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] + +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] - fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); - } - _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int64x2_t = simd_cast(a); + let d: int64x2_t = simd_cast(b); + let ret_val: int64x2_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] - fn _vst3_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ); - } - _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + let c: uint16x8_t = simd_cast(a); + let d: uint16x8_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] - fn _vst3q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_cast(a); + let d: uint16x8_t = simd_cast(b); + let ret_val: uint16x8_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] - fn _vst3_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ); - } - _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + let c: uint32x4_t = simd_cast(a); + let d: uint32x4_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] - fn _vst3q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_cast(a); + let d: uint32x4_t = simd_cast(b); + let ret_val: uint32x4_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" - )] - fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); - } - _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + let c: uint64x2_t = simd_cast(a); + let d: uint64x2_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] + +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" - )] - fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint64x2_t = simd_cast(a); + let d: uint64x2_t = simd_cast(b); + let ret_val: uint64x2_t = simd_sub(c, d); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" - )] - fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" - )] - fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int16x8_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" - )] - fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" - )] - fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: int32x4_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" - )] - fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] + +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(ssubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43244,23 +114182,26 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: int64x2_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43269,23 +114210,23 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43294,23 +114235,26 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: uint16x8_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43319,23 +114263,23 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43344,23 +114288,26 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3q_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let ret_val: uint32x4_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43369,23 +114316,23 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] + +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(usubw) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -43394,1059 +114341,1789 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: uint64x2_t = simd_sub(a, simd_cast(b)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(sudot, LANE = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vsudot_lane_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, transmute(c), b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sudot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +pub unsafe fn vsudot_lane_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sudot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsudotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, transmute(c), b) +} + +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sudot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsudotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] + fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vtbl1(a, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] + fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbl1(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vtbl1(a, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbl1(a, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl1(transmute(a), transmute(b))) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl1(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl1(transmute(a), transmute(b))) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl1(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] + fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + _vtbl2(a, b, c) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] + fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbl2(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vtbl2(a.0, a.1, b) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbl2(a.0, a.1, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] + fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + } + _vtbl3(a, b, c, d) +} + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] + fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbl3(a, b, c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" - )] - fn _vst3_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i8); - } - _vst3_s64(b.0, b.1, b.2, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + vtbl3(a.0, a.1, a.2, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] - fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); - } - _vst3_s64(a as _, b.0, b.1, b.2, 8) +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbl3(a.0, a.1, a.2, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] + fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + } + _vtbl4(a, b, c, d, e) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { - vst3_s32(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] + fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbl4(a, b, c, d, e); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { - vst3q_s32(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + vtbl4(a.0, a.1, a.2, a.3, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + let mut a: int8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); + a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); + a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); + a.3 = simd_shuffle!(a.3, a.3, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbl4(a.0, a.1, a.2, a.3, b); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] + +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] - fn _vst4_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - size: i32, - ); +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] + fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } - _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) + _vtbx1(a, b, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] + fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbx1(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] - fn _vst4q_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - size: i32, - ); - } - _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + vtbx1(a, b, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbx1(a, b, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] - fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); - } - _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] - fn _vst4q_s8( - ptr: *mut i8, - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - size: i32, - ); - } - _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] - fn _vst4_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - size: i32, - ); +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] + fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } - _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) + _vtbx2(a, b, c, d) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] + fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbx2(a, b, c, d); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] - fn _vst4q_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - size: i32, - ); - } - _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vtbx2(a, b.0, b.1, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x2_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbx2(a, b.0, b.1, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] - fn _vst4_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - size: i32, - ); - } - _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] - fn _vst4q_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - size: i32, - ); - } - _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" - )] - fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); - } - _vst4_f32(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" - )] - fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] + fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } - _vst4q_f32(b.0, b.1, b.2, b.3, a as _) + _vtbx3(a, b, c, d, e) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" - )] - fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - _vst4_s8(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] + fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbx3(a, b, c, d, e); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" - )] - fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - _vst4q_s8(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + vtbx3(a, b.0, b.1, b.2, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" - )] - fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); - } - _vst4_s16(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x3_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); + b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); + b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = vtbx3(a, b.0, b.1, b.2, c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" - )] - fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); - } - _vst4q_s16(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" - )] - fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); - } - _vst4_s32(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" - )] - fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); - } - _vst4q_s32(b.0, b.1, b.2, b.3, a as _) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] - fn _vst4_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ); - } - _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] - fn _vst4q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ); +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx4( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, +) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] + fn _vtbx4( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, + ) -> int8x8_t; } - _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vtbx4(a, b, c, d, e, f) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] - fn _vst4_lane_s8( - ptr: *mut i8, +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx4( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, +) -> int8x8_t { + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] + fn _vtbx4( a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, - n: i32, - size: i32, - ); - } - _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) + e: int8x8_t, + f: int8x8_t, + ) -> int8x8_t; + } + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); + let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); + let f: int8x8_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int8x8_t = _vtbx4(a, b, c, d, e, f); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] - fn _vst4_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ); - } - _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] - fn _vst4q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ); - } - _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x4_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] - fn _vst4_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ); - } - _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) +} + +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] - fn _vst4q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ); - } - _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" - )] - fn _vst4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" - )] - fn _vst4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" - )] - fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); - } - _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" - )] - fn _vst4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" - )] - fn _vst4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) +} + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val +} + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) +} + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: float32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" - )] - fn _vst4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" - )] - fn _vst4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: int8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44455,23 +116132,33 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a1: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44480,23 +116167,46 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a1: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: int8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44505,23 +116215,25 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44530,23 +116242,30 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: int16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44555,23 +116274,25 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4q_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44580,23 +116301,30 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: int16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44605,23 +116333,25 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -44630,21 +116360,29 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: int32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44654,62 +116392,56 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] - fn _vst4_s64( - ptr: *mut i8, - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - size: i32, - ); - } - _vst4_s64(a as _, b.0, b.1, b.2, b.3, 8) +pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" - )] - fn _vst4_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i8); - } - _vst4_s64(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: uint8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44719,20 +116451,32 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a1: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44742,20 +116486,45 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a1: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: uint8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44765,20 +116534,24 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44788,20 +116561,29 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: uint16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44811,20 +116593,24 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44834,20 +116620,29 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { - vst4_s32(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: uint16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44857,20 +116652,24 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { - vst4q_s32(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44880,20 +116679,29 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: uint32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44903,20 +116711,24 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44926,20 +116738,29 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: poly8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44949,20 +116770,32 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a1: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44972,20 +116805,45 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_sub(a, b) +pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a1: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: poly8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -44995,20 +116853,24 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_sub(a, b) +pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -45018,20 +116880,29 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_sub(a, b) +pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: poly16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -45041,20 +116912,24 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_sub(a, b) +pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] + +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -45064,20 +116939,29 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_sub(a, b) +pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: poly16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45087,20 +116971,24 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_sub(a, b) +pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let c: int8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45110,20 +116998,27 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_sub(a, b) +pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45133,20 +117028,24 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_sub(a, b) +pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let c: int8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45156,20 +117055,31 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_sub(a, b) +pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_ne(c, transmute(d)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45179,20 +117089,24 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_sub(a, b) +pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let c: int16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45202,20 +117116,27 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_sub(a, b) +pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45225,20 +117146,24 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_sub(a, b) +pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let c: int16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45248,20 +117173,27 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_sub(a, b) +pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45271,20 +117203,24 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_sub(a, b) +pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let c: int32x2_t = simd_and(a, b); + let d: i32x2 = i32x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45294,20 +117230,27 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_sub(a, b) +pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: int32x2_t = simd_and(a, b); + let d: i32x2 = i32x2::new(0, 0); + let ret_val: uint32x2_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45317,20 +117260,24 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_sub(a, b) +pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let c: int32x4_t = simd_and(a, b); + let d: i32x4 = i32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45340,20 +117287,27 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_sub(a, b) +pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: int32x4_t = simd_and(a, b); + let d: i32x4 = i32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45363,20 +117317,24 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_sub(a, b) +pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + let c: poly8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45386,21 +117344,27 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let d: int8x8_t = vsubhn_s16(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: poly8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45410,21 +117374,24 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let d: int16x4_t = vsubhn_s32(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + let c: poly8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45434,21 +117401,31 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let d: int32x2_t = vsubhn_s64(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3]) +pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: poly8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_ne(c, transmute(d)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45458,21 +117435,24 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let d: uint8x8_t = vsubhn_u16(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { + let c: poly16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45482,21 +117462,27 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let d: uint16x4_t = vsubhn_u32(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: poly16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45506,21 +117492,24 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let d: uint32x2_t = vsubhn_u64(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3]) +pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { + let c: poly16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] + +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45530,21 +117519,27 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: poly16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45554,21 +117549,24 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - let c: i32x4 = i32x4::new(16, 16, 16, 16); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c: uint8x8_t = simd_and(a, b); + let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45578,21 +117576,27 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - let c: i64x2 = i64x2::new(32, 32); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint8x8_t = simd_and(a, b); + let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45602,21 +117606,24 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c: uint8x16_t = simd_and(a, b); + let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45626,21 +117633,31 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let c: u32x4 = u32x4::new(16, 16, 16, 16); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x16_t = simd_and(a, b); + let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint8x16_t = simd_ne(c, transmute(d)); + simd_shuffle!( + ret_val, + ret_val, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45650,21 +117667,24 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let c: u64x2 = u64x2::new(32, 32); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c: uint16x4_t = simd_and(a, b); + let d: u16x4 = u16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45674,22 +117694,27 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let c: int16x8_t = simd_cast(a); - let d: int16x8_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint16x4_t = simd_and(a, b); + let d: u16x4 = u16x4::new(0, 0, 0, 0); + let ret_val: uint16x4_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45699,22 +117724,24 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let c: int32x4_t = simd_cast(a); - let d: int32x4_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c: uint16x8_t = simd_and(a, b); + let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45724,22 +117751,27 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let c: int64x2_t = simd_cast(a); - let d: int64x2_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: uint16x8_t = simd_and(a, b); + let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + let ret_val: uint16x8_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45749,22 +117781,24 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - let c: uint16x8_t = simd_cast(a); - let d: uint16x8_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c: uint32x2_t = simd_and(a, b); + let d: u32x2 = u32x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45774,22 +117808,27 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - let c: uint32x4_t = simd_cast(a); - let d: uint32x4_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let c: uint32x2_t = simd_and(a, b); + let d: u32x2 = u32x2::new(0, 0); + let ret_val: uint32x2_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45799,22 +117838,24 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - let c: uint64x2_t = simd_cast(a); - let d: uint64x2_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c: uint32x4_t = simd_and(a, b); + let d: u32x4 = u32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] + +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -45824,137 +117865,201 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let c: uint32x4_t = simd_and(a, b); + let d: u32x4 = u32x4::new(0, 0, 0, 0); + let ret_val: uint32x4_t = simd_ne(c, transmute(d)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vusdot_lane_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, b, transmute(c)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vusdot_lane_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vusdotq_lane_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, b, transmute(c)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] + +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vusdotq_lane_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] + +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(usdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] + fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + _vusdot_s32(a, b.as_signed(), c) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] + +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) + assert_instr(usdot) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223") @@ -45963,29 +118068,35 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsudot_lane_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, transmute(c), b) +pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] + fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); + let ret_val: int32x2_t = _vusdot_s32(a, b.as_signed(), c); + simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] + +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) + assert_instr(usdot) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223") @@ -45994,102 +118105,137 @@ pub unsafe fn vsudot_lane_s32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsudotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, transmute(c), b) +pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] + fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vusdotq_s32(a, b.as_signed(), c) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] + +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(usdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] + fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int32x4_t = _vusdotq_s32(a, b.as_signed(), c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] + +#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(usmmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] + fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vusmmlaq_s32(a, b.as_signed(), c) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] + +#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(usmmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] + fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let ret_val: int32x4_t = _vusmmlaq_s32(a, b.as_signed(), c); + simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46099,22 +118245,24 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46124,22 +118272,29 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46149,30 +118304,24 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a1: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: int8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) +pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46182,22 +118331,29 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46207,22 +118363,24 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -46232,22 +118390,29 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46257,22 +118422,24 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46282,30 +118449,29 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a1: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: uint8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) +pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: float32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46315,22 +118481,24 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46340,22 +118508,29 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: int8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46365,22 +118540,32 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46390,22 +118575,45 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + let mut ret_val: int8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46415,30 +118623,24 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a1: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: poly8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) +pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46448,22 +118650,29 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: int16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46473,22 +118682,24 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46498,22 +118709,29 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let c: int8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: int16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46523,22 +118741,24 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let c: int8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46548,22 +118768,29 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let c: int16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: int32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46573,22 +118800,24 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let c: int16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46598,22 +118827,29 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let c: int32x2_t = simd_and(a, b); - let d: i32x2 = i32x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: uint8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46623,22 +118859,32 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let c: int32x4_t = simd_and(a, b); - let d: i32x4 = i32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46648,22 +118894,45 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - let c: poly8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + let mut ret_val: uint8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46673,22 +118942,24 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - let c: poly8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46698,22 +118969,29 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { - let c: poly16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: uint16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46723,22 +119001,24 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { - let c: poly16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46748,22 +119028,29 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c: uint8x8_t = simd_and(a, b); - let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: uint16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46773,22 +119060,24 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c: uint8x16_t = simd_and(a, b); - let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46798,22 +119087,29 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c: uint16x4_t = simd_and(a, b); - let d: u16x4 = u16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: uint32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46823,22 +119119,24 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c: uint16x8_t = simd_and(a, b); - let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46848,22 +119146,29 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c: uint32x2_t = simd_and(a, b); - let d: u32x2 = u32x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: poly8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -46873,146 +119178,166 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c: uint32x4_t = simd_and(a, b); - let d: u32x4 = u32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) + assert_instr(uzp) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdot_lane_s32( - a: int32x2_t, - b: uint8x8_t, - c: int8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, b, transmute(c)) +pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + let mut ret_val: poly8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) + assert_instr(uzp) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdotq_lane_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, b, transmute(c)) +pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] - fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vusdot_s32(a, b.as_signed(), c) +pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: poly16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] + +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] - fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vusdotq_s32(a, b.as_signed(), c) +pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } + #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -47022,16 +119347,23 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: poly16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -47047,16 +119379,18 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); +pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -47072,22 +119406,29 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47097,22 +119438,24 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); +pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47122,22 +119465,29 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47147,30 +119497,24 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a0: int8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: int8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); +pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47180,22 +119524,29 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47205,22 +119556,24 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); +pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47230,22 +119583,29 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: int8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47255,22 +119615,24 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); +pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47280,30 +119642,29 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - transmute((a0, b0)) +pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: int16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47313,22 +119674,24 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); +pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47338,22 +119701,29 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: uint8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47363,22 +119733,24 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); +pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47388,22 +119760,29 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: uint16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47413,30 +119792,24 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a0: poly8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: poly8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); +pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47446,22 +119819,29 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: poly8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -47471,19 +119851,21 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); +pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47496,19 +119878,26 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: poly16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47521,19 +119910,21 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); +pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47546,19 +119937,26 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: float32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47571,19 +119969,29 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47596,19 +120004,42 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) +pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + let mut ret_val: int8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47621,19 +120052,21 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47646,19 +120079,26 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) +pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: int16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47671,19 +120111,21 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -47696,16 +120138,23 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) +pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: int32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47721,16 +120170,26 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47746,24 +120205,39 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a0: int8x16_t = simd_shuffle!( +pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let a0: uint8x16_t = simd_shuffle!( a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ); - let b0: int8x16_t = simd_shuffle!( + let b0: uint8x16_t = simd_shuffle!( a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - transmute((a0, b0)) + let mut ret_val: uint8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47779,16 +120253,18 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47804,16 +120280,23 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) +pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: uint16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47829,24 +120312,18 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); +pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47862,16 +120339,23 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - transmute((a0, b0)) +pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: uint32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); + ret_val } + #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47887,16 +120371,26 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47913,6 +120407,8 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let a0: poly8x16_t = simd_shuffle!( a, b, @@ -47923,13 +120419,26 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - transmute((a0, b0)) + let mut ret_val: poly8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + ret_val } + #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -47950,3 +120459,35 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } + +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: poly16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); + ret_val +} diff --git a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 6f622b18be..3ce3e4fcb4 100644 --- a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -2,6 +2,8 @@ arch_cfgs: - arch_name: aarch64 target_feature: [neon] llvm_prefix: llvm.aarch64.neon +# Generate big endian shuffles +auto_big_endian: true # Repeatedly used anchors # #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -66,6 +68,9 @@ neon-unstable-i8mm: &neon-unstable-i8mm neon-unstable-fcma: &neon-unstable-fcma FnCall: [unstable, ['feature = "stdarch_neon_fcma"', 'issue = "117222"']] +aarch64-crc-stable: &aarch64-crc-stable + FnCall: [stable, ['feature = "stdarch_aarch64_crc32"', 'since = "1.80.0"']] + intrinsics: - name: "vaddd_{type}" doc: Add @@ -1841,10 +1846,11 @@ intrinsics: safety: unsafe: [neon] types: - - [poly64x2_t, ' static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] - - [float64x2_t, ' static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [poly64x2_t, ' static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [float64x2_t, ' static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vmla{neon_type.no}" doc: "Floating-point multiply-add to accumulator" @@ -4631,6 +4637,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.{type[2]}.{neon_type[0]}" arch: aarch64,arm64ec + - FnCall: ['_vaddlv{neon_type[0].no}', ['a.as_signed()']] - name: "vaddlv{neon_type[0].no}" doc: Unsigned Add Long across Vector @@ -4648,6 +4655,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.{type[2]}.{neon_type[0]}" arch: aarch64,arm64ec + - FnCall: ['_vaddlv{neon_type[0].no}', ['a.as_signed()']] - name: "vsubw_high{neon_type[1].noq}" doc: Signed Subtract Wide @@ -7213,19 +7221,37 @@ intrinsics: - [poly8x8_t, poly8x8_t, poly8x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - [poly16x4_t, poly16x4_t, poly16x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - [float32x2_t, float32x2_t, float32x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int8x16_t, int8x8_t, int8x16_t, '4', '3', ' let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int16x8_t, int16x4_t, int16x8_t, '3', '2', ' let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int32x4_t, int32x2_t, int32x4_t, '2', '1', ' let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint8x16_t, uint8x8_t, uint8x16_t, '4', '3', ' let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint16x8_t, uint16x4_t, uint16x8_t, '3', '2', ' let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint32x4_t, uint32x2_t, uint32x4_t, '2', '1', ' let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [poly8x16_t, poly8x8_t, poly8x16_t, '4', '3', ' let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [poly16x8_t, poly16x4_t, poly16x8_t, '3', '2', ' let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - Identifier: ["{type[5]}", Symbol] + - name: "vcopy{neon_type[0].lane_nox}" + doc: "Insert vector element from another vector element" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [mov, 'LANE1 = 0', 'LANE2 = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['1', '3']] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + static_defs: ['const LANE1: i32, const LANE2: i32'] + safety: + unsafe: [neon] + types: + - [int8x16_t, int8x8_t, int8x16_t, '4', '3', ' let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x8_t, int16x4_t, int16x8_t, '3', '2', ' let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x4_t, int32x2_t, int32x4_t, '2', '1', ' let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x16_t, uint8x8_t, uint8x16_t, '4', '3', ' let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x8_t, uint16x4_t, uint16x8_t, '3', '2', ' let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x4_t, uint32x2_t, uint32x4_t, '2', '1', ' let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x16_t, poly8x8_t, poly8x16_t, '4', '3', ' let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x8_t, poly16x4_t, poly16x8_t, '3', '2', ' let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + compose: + - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] + - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] + - Identifier: ["{type[5]}", Symbol] + - Identifier: ["{type[6]}", Symbol] + - name: "vcopy{neon_type[0].laneq_nox}" doc: "Insert vector element from another vector element" arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] @@ -7251,20 +7277,38 @@ intrinsics: - [poly64x2_t, poly64x2_t, poly64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - [float32x4_t, float32x4_t, float32x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - [float64x2_t, float64x2_t, float64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int8x8_t, int8x16_t, int8x8_t, '3', '4', ' let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int16x4_t, int16x8_t, int16x4_t, '2', '3', ' let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [int32x2_t, int32x4_t, int32x2_t, '1', '2', ' let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint8x8_t, uint8x16_t, uint8x8_t, '3', '4', ' let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint16x4_t, uint16x8_t, uint16x4_t, '2', '3', ' let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint32x2_t, uint32x4_t, uint32x2_t, '1', '2', 'let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [poly8x8_t, poly8x16_t, poly8x8_t, '3', '4', ' let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [poly16x4_t, poly16x8_t, poly16x4_t, '2', '3', ' let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [float32x2_t, float32x4_t, float32x2_t, '1', '2', ' let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - Identifier: ["{type[5]}", Symbol] + - name: "vcopy{neon_type[0].laneq_nox}" + doc: "Insert vector element from another vector element" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [mov, 'LANE1 = 0', 'LANE2 = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['1', '3']] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + static_defs: ['const LANE1: i32, const LANE2: i32'] + safety: + unsafe: [neon] + types: + - [int8x8_t, int8x16_t, int8x8_t, '3', '4', ' let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x4_t, int16x8_t, int16x4_t, '2', '3', ' let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x2_t, int32x4_t, int32x2_t, '1', '2', ' let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x8_t, uint8x16_t, uint8x8_t, '3', '4', ' let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x4_t, uint16x8_t, uint16x4_t, '2', '3', ' let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x2_t, uint32x4_t, uint32x2_t, '1', '2', 'let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x8_t, poly8x16_t, poly8x8_t, '3', '4', ' let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x4_t, poly16x8_t, poly16x4_t, '2', '3', ' let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float32x2_t, float32x4_t, float32x2_t, '1', '2', ' let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + compose: + - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] + - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] + - Identifier: ["{type[5]}", Symbol] + - Identifier: ["{type[6]}", Symbol] + - name: "vcopyq_lane_{neon_type[0]}" doc: "Insert vector element from another vector element" arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] @@ -7277,14 +7321,15 @@ intrinsics: safety: unsafe: [neon] types: - - [int64x2_t, int64x1_t, 'let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [uint64x2_t, uint64x1_t, 'let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [poly64x2_t, poly64x1_t, 'let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] - - [float64x2_t, float64x1_t, ' let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int64x2_t, int64x1_t, 'let b: int64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint64x2_t, uint64x1_t, 'let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly64x2_t, poly64x1_t, 'let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float64x2_t, float64x1_t, ' let b: float64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '1']] - FnCall: [static_assert!, ['LANE2 == 0']] - Identifier: ['{type[2]}', Symbol] + - Identifier: ['{type[3]}', Symbol] - name: "vcopyq_lane_f32" doc: "Insert vector element from another vector element" @@ -7298,11 +7343,12 @@ intrinsics: safety: unsafe: [neon] types: - - [float32x4_t, float32x2_t, ' let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float32x4_t, float32x2_t, ' let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, 2]] - FnCall: [static_assert_uimm_bits!, [LANE2, 1]] - Identifier: ["{type[2]}", Symbol] + - Identifier: ["{type[3]}", Symbol] - name: "vcreate_f64" doc: "Insert vector element from another vector element" @@ -7646,6 +7692,73 @@ intrinsics: - link: "llvm.aarch64.crypto.sha512su1" arch: aarch64,arm64ec + - name: "vsm3tt{type[0]}" + doc: "{type[3]}" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [target_feature, ['enable = "neon,sm4"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, ['{type[2]}', 'IMM2 = 0']]}]] + - FnCall: [rustc_legacy_const_generics, ['3']] + - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] + static_defs: ["const IMM2: i32"] + safety: + unsafe: [neon] + types: + - ['1aq_u32', uint32x4_t, 'sm3tt1a', 'SM3TT1A'] + - ['1bq_u32', uint32x4_t, 'sm3tt1b', 'SM3TT1B'] + - ['2aq_u32', uint32x4_t, 'sm3tt2a', 'SM3TT2A'] + - ['2bq_u32', uint32x4_t, 'sm3tt2b', 'SM3TT2B'] + compose: + - FnCall: ["static_assert_uimm_bits!", [IMM2, "2"]] + - LLVMLink: + name: "_vsm3tt{type[0]}" + arguments: + - "a: {neon_type[1]}" + - "b: {neon_type[1]}" + - "c: {neon_type[1]}" + - "n: i64" + links: + - link: "llvm.aarch64.crypto.{type[2]}" + arch: aarch64,arm64ec + - FnCall: + - "_vsm3tt{type[0]}" + - - "a.as_signed()" + - "b.as_signed()" + - "c.as_signed()" + - "IMM2 as i64" + + - name: "vxarq_u64" + doc: "Exclusive OR and rotate" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - FnCall: [target_feature, ['enable = "neon,sha3"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, ['xar', 'IMM6 = 0']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - FnCall: [stable, ['feature = "stdarch_neon_sha3"', 'since = "1.79.0"']] + static_defs: ["const IMM6: i32"] + safety: + unsafe: [neon] + types: + - uint64x2_t + compose: + - FnCall: ["static_assert_uimm_bits!", [IMM6, "6"]] + - LLVMLink: + name: "_vxarq_u64" + arguments: + - "a: {neon_type}" + - "b: {neon_type}" + - "n: i64" + links: + - link: "llvm.aarch64.crypto.xar" + arch: aarch64,arm64ec + - FnCall: + - "_vxarq_u64" + - - "a.as_signed()" + - "b.as_signed()" + - "IMM6 as i64" + - name: "vrnd32x{neon_type.no}" doc: "Floating-point round to 32-bit integer, using current rounding mode" arguments: ["a: {neon_type}"] @@ -9850,3 +9963,1690 @@ intrinsics: - transmute - - FnCall: ["vld4{type[2]}", [{FnCall: [transmute, [a]]}]] + - name: "vtbx4{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, int8x8x4_t] + compose: + - FnCall: + - "vqtbx2" + - - FnCall: [transmute, [a]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].noq}", ["b.0", "b.1"]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].noq}", ["b.2", "b.3"]] + - FnCall: [transmute, [c]] + + - name: "vtbx4{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, uint8x8x4_t, uint8x8_t] + - [poly8x8_t, poly8x8x4_t, uint8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - "vqtbx2" + - - FnCall: [transmute, [a]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].noq}", ["b.0", "b.1"]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].noq}", ["b.2", "b.3"]] + - c + + - name: "vtbl1{neon_type[0].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x8_t', 'transmute(b)'] + - [uint8x8_t, 'uint8x8_t', 'b'] + - [poly8x8_t, 'uint8x8_t', 'b'] + compose: + - FnCall: + - 'vqtbl1{neon_type[0].no}' + - - FnCall: + - 'vcombine{neon_type[0].no}' + - - a + - 'crate::mem::zeroed()' + - Identifier: ['{type[2]}', Symbol] + + - name: "vtbl2{neon_type[1].noq}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8x2_t, 'int8x8_t'] + compose: + - FnCall: + - vqtbl1 + - - FnCall: + - transmute + - - FnCall: + - 'vcombine{neon_type[1].noq}' + - - 'a.0' + - 'a.1' + - FnCall: [transmute, [b]] + + - name: "vtbl2{neon_type[2].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8x2_t, 'uint8x8_t', 'uint8x8_t'] + - [poly8x8x2_t, 'uint8x8_t', 'poly8x8_t'] + compose: + - FnCall: + - transmute + - - FnCall: + - vqtbl1 + - - FnCall: + - transmute + - - FnCall: + - 'vcombine{neon_type[2].noq}' + - - 'a.0' + - 'a.1' + - b + + - name: "vtbl3{neon_type[1].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8x3_t, 'int8x8_t', 'int8x16x2'] + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[1].no}', ['a.0', 'a.1']] + - FnCall: ['vcombine{neon_type[1].no}', ['a.2', 'crate::mem::zeroed()']] + - FnCall: + - transmute + - - FnCall: + - vqtbl2 + - - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - FnCall: [transmute, [b]] + + - name: "vtbl3{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8x3_t, 'uint8x8_t', 'uint8x16x2', 'uint8x8_t'] + - [poly8x8x3_t, 'uint8x8_t', 'poly8x16x2', 'poly8x8_t'] + big_endian_inverse: true + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[3].no}', ['a.0', 'a.1']] + - FnCall: ['vcombine{neon_type[3].no}', ['a.2', 'crate::mem::zeroed()']] + - FnCall: + - transmute + - - FnCall: + - vqtbl2 + - - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - b + + - name: "vtbl4{neon_type[1].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8x4_t, 'int8x8_t', 'int8x16x2'] + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[1].no}', ['a.0', 'a.1']] + - FnCall: ['vcombine{neon_type[1].no}', ['a.2', 'a.3']] + - FnCall: + - transmute + - - FnCall: + - 'vqtbl2' + - - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - FnCall: [transmute, [b]] + + - name: "vtbl4{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8x4_t, 'uint8x8_t', 'uint8x16x2', 'uint8x8_t'] + - [poly8x8x4_t, 'uint8x8_t', 'poly8x16x2', 'poly8x8_t'] + big_endian_inverse: true + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[3].no}', ['a.0', 'a.1']] + - FnCall: ['vcombine{neon_type[3].no}', ['a.2', 'a.3']] + - FnCall: + - transmute + - - FnCall: + - 'vqtbl2' + - - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - b + + - name: "vqtbx1{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, int8x16_t, uint8x8_t, vqtbx1] + - [int8x16_t, int8x16_t, uint8x16_t, vqtbx1q] + compose: + - FnCall: ['{type[3]}', [a, b, c]] + + - name: "vqtbx1{type[4]}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, "uint8x16_t", uint8x8_t, "vqtbx1", "_u8"] + - [poly8x8_t, "poly8x16_t", uint8x8_t, "vqtbx1", "_p8"] + - [uint8x16_t, "uint8x16_t", uint8x16_t, "vqtbx1q", "q_u8"] + - [poly8x16_t, "poly8x16_t", uint8x16_t, "vqtbx1q", "q_p8"] + compose: + - Let: + - x + - FnCall: + - transmute + - - FnCall: + - "{type[3]}" + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + - c + - Identifier: [x, Symbol] + + - name: "vtbx1{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}", "c: {neon_type[1]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, "int8x8_t", "transmute(c)", "i8x8::splat(8)", "int8x8"] + - [uint8x8_t, "uint8x8_t", "c", "u8x8::splat(8)", "uint8x8"] + - [poly8x8_t, "uint8x8_t", "c", "u8x8::splat(8)", "uint8x8"] + compose: + - FnCall: + - simd_select + - - FnCall: + - "simd_lt::<{type[4]}_t, int8x8_t>" + - - c + - FnCall: [transmute, ["{type[3]}"]] + - FnCall: + - transmute + - - FnCall: + - "vqtbx1" + - - "transmute(a)" + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].no}", [b, "crate::mem::zeroed()"]] + - "{type[2]}" + - a + + - name: "vtbx2{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x8x2_t'] + compose: + - FnCall: + - vqtbx1 + - - FnCall: [transmute, [a]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].no}", ['b.0', 'b.1']] + - FnCall: [transmute, [c]] + + - name: "vtbx2{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, 'uint8x8x2_t', uint8x8_t] + - [poly8x8_t, 'poly8x8x2_t', uint8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vqtbx1 + - - FnCall: [transmute, [a]] + - FnCall: + - transmute + - - FnCall: ["vcombine{neon_type[0].no}", ['b.0', 'b.1']] + - c + + - name: "vtbx3{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x8x3_t', 'int8x16x2', 'i8x8::splat(24)', 'int8x8'] + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[0].no}', ['b.0', 'b.1']] + - FnCall: ['vcombine{neon_type[0].no}', ['b.2', 'crate::mem::zeroed()']] + - FnCall: + - transmute + - - FnCall: + - simd_select + - - FnCall: + - 'simd_lt::<{type[4]}_t, int8x8_t>' + - - FnCall: [transmute, [c]] + - FnCall: [transmute, ['{type[3]}']] + - FnCall: + - transmute + - - FnCall: + - 'vqtbx2' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - FnCall: [transmute, [c]] + - a + + - name: "vtbx3{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: uint8x8_t"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, 'uint8x8x3_t', 'uint8x16x2', 'u8x8::splat(24)', 'uint8x8'] + - [poly8x8_t, 'poly8x8x3_t', 'poly8x16x2', 'u8x8::splat(24)', 'poly8x8'] + big_endian_inverse: true + compose: + - Let: + - x + - FnCall: + - '{type[2]}_t' + - - FnCall: ['vcombine{neon_type[0].no}', ['b.0', 'b.1']] + - FnCall: ['vcombine{neon_type[0].no}', ['b.2', 'crate::mem::zeroed()']] + - FnCall: + - transmute + - - FnCall: + - simd_select + - - FnCall: + - 'simd_lt::<{type[4]}_t, int8x8_t>' + - - FnCall: [transmute, [c]] + - FnCall: [transmute, ['{type[3]}']] + - FnCall: + - transmute + - - FnCall: + - 'vqtbx2' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['x.0']] + - FnCall: [transmute, ['x.1']] + - c + - a + + - name: "vqtbl1{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['int8x16_t', uint8x8_t, 'vqtbl1', 'int8x8_t'] + - ['int8x16_t', uint8x16_t, 'vqtbl1q', 'int8x16_t'] + compose: + - FnCall: ['{type[2]}', ['a', b]] + + - name: "vqtbl1{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['uint8x16_t', uint8x8_t, 'vqtbl1', 'uint8x8_t'] + - ['poly8x16_t', uint8x8_t, 'vqtbl1', 'poly8x8_t'] + - ['uint8x16_t', uint8x16_t, 'vqtbl1q', 'uint8x16_t'] + - ['poly8x16_t', uint8x16_t, 'vqtbl1q', 'poly8x16_t'] + compose: + - Let: + - x + - FnCall: + - transmute + - - FnCall: + - '{type[2]}' + - - FnCall: [transmute, ['a']] + - b + - Identifier: [x, Symbol] + + - name: "vqtbl2{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['int8x16x2_t', uint8x8_t, 'vqtbl2', 'int8x8_t'] + - ['int8x16x2_t', uint8x16_t, 'vqtbl2q', 'int8x16_t'] + compose: + - FnCall: ['{type[2]}', ['a.0', 'a.1', b]] + + - name: "vqtbl2{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['uint8x16x2_t', uint8x8_t, 'vqtbl2', 'uint8x8_t'] + - ['uint8x16x2_t', uint8x16_t, 'vqtbl2q', 'uint8x16_t'] + - ['poly8x16x2_t', uint8x8_t, 'vqtbl2', 'poly8x8_t'] + - ['poly8x16x2_t', uint8x16_t, 'vqtbl2q', 'poly8x16_t'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[2]}' + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - b + + - name: "vqtbx2{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x16x2_t', uint8x8_t, 'vqtbx2'] + - [int8x16_t, 'int8x16x2_t', uint8x16_t, 'vqtbx2q'] + compose: + - FnCall: ['{type[3]}', [a, 'b.0', 'b.1', c]] + + - name: "vqtbx2{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, 'uint8x16x2_t', uint8x8_t, 'vqtbx2'] + - [uint8x16_t, 'uint8x16x2_t', uint8x16_t, 'vqtbx2q'] + - [poly8x8_t, 'poly8x16x2_t', uint8x8_t, 'vqtbx2'] + - [poly8x16_t, 'poly8x16x2_t', uint8x16_t, 'vqtbx2q'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[3]}' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['b.0']] + - FnCall: [transmute, ['b.1']] + - c + + - name: "vqtbl3{neon_type[0].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['int8x8_t', 'int8x16x3_t', uint8x8_t, 'vqtbl3'] + - ['int8x16_t', 'int8x16x3_t', uint8x16_t, 'vqtbl3q'] + compose: + - FnCall: ['{type[3]}', ['a.0', 'a.1', 'a.2', b]] + + - name: "vqtbl3{neon_type[0].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['uint8x8_t', 'uint8x16x3_t', uint8x8_t, 'vqtbl3'] + - ['uint8x16_t','uint8x16x3_t', uint8x16_t, 'vqtbl3q'] + - ['poly8x8_t', 'poly8x16x3_t', uint8x8_t, 'vqtbl3'] + - ['poly8x16_t','poly8x16x3_t', uint8x16_t, 'vqtbl3q'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[3]}' + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - FnCall: [transmute, ['a.2']] + - b + + - name: "vqtbx3{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x16x3_t', uint8x8_t, 'vqtbx3'] + - [int8x16_t, 'int8x16x3_t', uint8x16_t, 'vqtbx3q'] + compose: + - FnCall: ['{type[3]}', [a, 'b.0', 'b.1', 'b.2', c]] + + - name: "vqtbx3{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, 'uint8x16x3_t', uint8x8_t, 'vqtbx3'] + - [uint8x16_t, 'uint8x16x3_t', uint8x16_t, 'vqtbx3q'] + - [poly8x8_t, 'poly8x16x3_t', uint8x8_t, 'vqtbx3'] + - [poly8x16_t, 'poly8x16x3_t', uint8x16_t, 'vqtbx3q'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[3]}' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['b.0']] + - FnCall: [transmute, ['b.1']] + - FnCall: [transmute, ['b.2']] + - c + + - name: "vqtbl4{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['int8x16x4_t', uint8x8_t, 'vqtbl4', 'int8x8_t'] + - ['int8x16x4_t', uint8x16_t, 'vqtbl4q', 'int8x16_t'] + compose: + - FnCall: ['{type[2]}', ['a.0', 'a.1', 'a.2', 'a.3', b]] + + - name: "vqtbl4{neon_type[3].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['uint8x16x4_t', uint8x8_t, 'vqtbl4', 'uint8x8_t'] + - ['uint8x16x4_t', uint8x16_t, 'vqtbl4q', 'uint8x16_t'] + - ['poly8x16x4_t', uint8x8_t, 'vqtbl4', 'poly8x8_t'] + - ['poly8x16x4_t', uint8x16_t, 'vqtbl4q', 'poly8x16_t'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[2]}' + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - FnCall: [transmute, ['a.2']] + - FnCall: [transmute, ['a.3']] + - b + + - name: "vqtbx4{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'int8x16x4_t', uint8x8_t, 'vqtbx4'] + - [int8x16_t, 'int8x16x4_t', uint8x16_t, 'vqtbx4q'] + compose: + - FnCall: ['{type[3]}', [a, 'b.0', 'b.1', 'b.2', 'b.3', c]] + + - name: "vqtbx4{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [uint8x8_t, 'uint8x16x4_t', uint8x8_t, 'vqtbx4'] + - [uint8x16_t, 'uint8x16x4_t', uint8x16_t, 'vqtbx4q'] + - [poly8x8_t, 'poly8x16x4_t', uint8x8_t, 'vqtbx4'] + - [poly8x16_t, 'poly8x16x4_t', uint8x16_t, 'vqtbx4q'] + compose: + - FnCall: + - transmute + - - FnCall: + - '{type[3]}' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['b.0']] + - FnCall: [transmute, ['b.1']] + - FnCall: [transmute, ['b.2']] + - FnCall: [transmute, ['b.3']] + - c + + - name: "{type[0]}" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ["vqtbl1", "int8x16_t", "uint8x8_t", "int8x8_t"] + - ["vqtbl1q", "int8x16_t", "uint8x16_t", "int8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbl1.{neon_type[3]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ["vqtbl2", "int8x16_t", "uint8x8_t", "int8x8_t"] + - ["vqtbl2q", "int8x16_t", "uint8x16_t", "int8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbl2.{neon_type[3]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[1]}", "d: {neon_type[2]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ["vqtbl3", int8x16_t, uint8x8_t, int8x8_t] + - ["vqtbl3q", int8x16_t, uint8x16_t, int8x16_t] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbl3.{neon_type[3]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[1]}", "d: {neon_type[1]}", "e: {neon_type[2]}"] + return_type: "{neon_type[3]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ["vqtbl4", int8x16_t, uint8x8_t, int8x8_t] + - ["vqtbl4q", int8x16_t, uint8x16_t, int8x16_t] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbl4.{neon_type[3]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[3]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [vqtbx1, "int8x8_t", "int8x16_t", "uint8x8_t"] + - [vqtbx1q, "int8x16_t", "int8x16_t", "uint8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbx1.{neon_type[1]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[2]}", "d: {neon_type[3]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [vqtbx2, "int8x8_t", "int8x16_t", "uint8x8_t"] + - [vqtbx2q, "int8x16_t", "int8x16_t", "uint8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbx2.{neon_type[1]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[2]}", "d: {neon_type[2]}", "e: {neon_type[3]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [vqtbx3, "int8x8_t", "int8x16_t", "uint8x8_t"] + - [vqtbx3q, "int8x16_t", "int8x16_t", "uint8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbx3.{neon_type[1]}" + arch: aarch64,arm64ec + + - name: "{type[0]}" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type[1]}", "b: {neon_type[2]}", "c: {neon_type[2]}", "d: {neon_type[2]}", "e: {neon_type[2]}", "f: {neon_type[3]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - [vqtbx4, "int8x8_t", "int8x16_t", "uint8x8_t"] + - [vqtbx4q, "int8x16_t", "int8x16_t", "uint8x16_t"] + compose: + - LLVMLink: + name: "_{type[0]}" + links: + - link: "llvm.aarch64.neon.tbx4.{neon_type[1]}" + arch: aarch64,arm64ec + + - name: "vld1{neon_type[1].no}" + doc: "Load multiple single-element structures to one, two, three, or four registers" + arguments: ["ptr: {type[0]}"] + return_type: "{neon_type[1]}" + attr: + - FnCall: [target_feature, ['enable = "{type[2]}"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [ldr]]}]] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['*const i8', int8x8_t, "neon"] + - ['*const i8', int8x16_t, "neon"] + - ['*const i16', int16x4_t, "neon"] + - ['*const i16', int16x8_t, "neon"] + - ['*const i32', int32x2_t, "neon"] + - ['*const i32', int32x4_t, "neon"] + - ['*const i64', int64x1_t, "neon"] + - ['*const i64', int64x2_t, "neon"] + - ['*const u8', uint8x8_t, "neon"] + - ['*const u8', uint8x16_t, "neon"] + - ['*const u16', uint16x4_t, "neon"] + - ['*const u16', uint16x8_t, "neon"] + - ['*const u32', uint32x2_t, "neon"] + - ['*const u32', uint32x4_t, "neon"] + - ['*const u64', uint64x1_t, "neon"] + - ['*const u64', uint64x2_t, "neon"] + - ['*const p8', poly8x8_t, "neon"] + - ['*const p8', poly8x16_t, "neon"] + - ['*const p16', poly16x4_t, "neon"] + - ['*const p16', poly16x8_t, "neon"] + - ['*const p64', poly64x1_t, "neon,aes"] + - ['*const p64', poly64x2_t, "neon,aes"] + - ['*const f32', float32x2_t, "neon"] + - ['*const f32', float32x4_t, "neon"] + - ['*const f64', float64x1_t, "neon"] + - ['*const f64', float64x2_t, "neon"] + compose: + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - ptr + - cast + - [] + + - name: "vst1{neon_type[1].no}" + doc: "Store multiple single-element structures from one, two, three, or four registers." + arguments: ["ptr: {type[0]}", "a: {neon_type[1]}"] + attr: + - FnCall: [target_feature, ['enable = "{type[2]}"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [str]]}]] + - FnCall: [allow, ['clippy::cast_ptr_alignment']] + - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + safety: + unsafe: [neon] + types: + - ['*mut i8', int8x8_t, "neon"] + - ['*mut i8', int8x16_t, "neon"] + - ['*mut i16', int16x4_t, "neon"] + - ['*mut i16', int16x8_t, "neon"] + - ['*mut i32', int32x2_t, "neon"] + - ['*mut i32', int32x4_t, "neon"] + - ['*mut i64', int64x1_t, "neon"] + - ['*mut i64', int64x2_t, "neon"] + - ['*mut u8', uint8x8_t, "neon"] + - ['*mut u8', uint8x16_t, "neon"] + - ['*mut u16', uint16x4_t, "neon"] + - ['*mut u16', uint16x8_t, "neon"] + - ['*mut u32', uint32x2_t, "neon"] + - ['*mut u32', uint32x4_t, "neon"] + - ['*mut u64', uint64x1_t, "neon"] + - ['*mut u64', uint64x2_t, "neon"] + - ['*mut p8', poly8x8_t, "neon"] + - ['*mut p8', poly8x16_t, "neon"] + - ['*mut p16', poly16x4_t, "neon"] + - ['*mut p16', poly16x8_t, "neon"] + - ['*mut p64', poly64x1_t, "neon,aes"] + - ['*mut p64', poly64x2_t, "neon,aes"] + - ['*mut f32', float32x2_t, "neon"] + - ['*mut f32', float32x4_t, "neon"] + - ['*mut f64', float64x1_t, "neon"] + - ['*mut f64', float64x2_t, "neon"] + compose: + - FnCall: + - 'crate::ptr::write_unaligned' + - - MethodCall: + - ptr + - cast + - [] + - a + + - name: "__crc32d" + doc: "CRC32 single round checksum for quad words (64 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *target-not-arm + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32x"]] }]] + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u64] + compose: + - LLVMLink: + name: "crc32x" + arguments: + - "crc: u32" + - "data: u64" + links: + - link: "llvm.aarch64.crc32x" + arch: aarch64,arm64ec + + - name: "__crc32cd" + doc: "CRC32-C single round checksum for quad words (64 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *target-not-arm + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32cx"]] }]] + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u64] + compose: + - LLVMLink: + name: "crc32cx" + arguments: + - "crc: u32" + - "data: u64" + links: + - link: "llvm.aarch64.crc32cx" + arch: aarch64,arm64ec + + - name: "{type[0]}" + doc: "Absolute Value (wrapping)." + arguments: ["a: {type[1]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [abs] + safety: + unsafe: [neon] + types: + - ['vabsd_s64', i64, i64] + - ['vabs_s64', int64x1_t, v1i64] + - ['vabsq_s64', int64x2_t, v2i64] + compose: + - LLVMLink: + name: "{type[0]}" + links: + - link: "llvm.aarch64.neon.abs.{type[2]}" + arch: aarch64,arm64ec + + - name: "vuqadd{neon_type[0].no}" + doc: "Signed saturating Accumulate of Unsigned value." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + attr: + - *neon-stable + assert_instr: [suqadd] + safety: + unsafe: [neon] + types: + - [int8x8_t, uint8x8_t] + - [int8x16_t, uint8x16_t] + - [int16x4_t, uint16x4_t] + - [int16x8_t, uint16x8_t] + - [int32x2_t, uint32x2_t] + - [int32x4_t, uint32x4_t] + - [int64x1_t, uint64x1_t] + - [int64x2_t, uint64x2_t] + compose: + - LLVMLink: + name: "vuqadd{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.suqadd.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vsqadd{neon_type[0].no}" + doc: "Unsigned saturating Accumulate of Signed value." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + attr: + - *neon-stable + assert_instr: [usqadd] + safety: + unsafe: [neon] + types: + - [uint8x8_t, int8x8_t] + - [uint8x16_t, int8x16_t] + - [uint16x4_t, int16x4_t] + - [uint16x8_t, int16x8_t] + - [uint32x2_t, int32x2_t] + - [uint32x4_t, int32x4_t] + - [uint64x1_t, int64x1_t] + - [uint64x2_t, int64x2_t] + compose: + - LLVMLink: + name: "vsqadd{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.usqadd.{neon_type[1]}" + arch: aarch64,arm64ec + + - name: "vpadd{neon_type.no}" + doc: "Add Pairwise" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - int8x16_t + - int16x8_t + - int32x4_t + - int64x2_t + compose: + - LLVMLink: + name: "vpadd{neon_type.no}" + links: + - link: "llvm.aarch64.neon.addp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpadd{neon_type[0].no}" + doc: "Add Pairwise" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [uint8x16_t, int8x16_t] + - [uint16x8_t, int16x8_t] + - [uint32x4_t, int32x4_t] + - [uint64x2_t, int64x2_t] + compose: + - FnCall: + - transmute + - - FnCall: + - 'vpadd{neon_type[1].no}' + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + + - name: "vpaddd_s64" + doc: "Add pairwise" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [int64x2_t, i64] + compose: + - FnCall: + - transmute + - - FnCall: + - "vaddvq_u64" + - - FnCall: [transmute, [a]] + + - name: "vpaddd_u64" + doc: "Add pairwise" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [uint64x2_t, u64] + compose: + - FnCall: [vaddvq_u64, [a]] + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addv] + safety: + unsafe: [neon] + types: + - [int8x8_t, i8, i32] + - [int16x4_t, i16, i32] + - [int8x16_t, i8, i32] + - [int16x8_t, i16, i32] + - [int32x4_t, i32, i32] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.saddv.{type[2]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [int32x2_t, i32] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.saddv.i32.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [int64x2_t, i64] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.saddv.i64.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addv] + safety: + unsafe: [neon] + types: + - [uint8x8_t, u8, i32] + - [uint16x4_t, u16, i32] + - [uint8x16_t, u8, i32] + - [uint16x8_t, u16, i32] + - [uint32x4_t, u32, i32] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.uaddv.{type[2]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [uint32x2_t, u32, i32] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.uaddv.{type[2]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddv{neon_type[0].no}" + doc: "Add across vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [addp] + safety: + unsafe: [neon] + types: + - [uint64x2_t, u64, i64] + compose: + - LLVMLink: + name: "vaddv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.uaddv.{type[2]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vaddlv{neon_type[0].no}" + doc: "Signed Add Long across Vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [saddlv] + safety: + unsafe: [neon] + types: + - [int8x8_t, i16] + - [int8x16_t, i16] + compose: + - LLVMLink: + name: "vaddlv{neon_type[0].no}" + return_type: "i32" + links: + - link: "llvm.aarch64.neon.saddlv.i32.{neon_type[0]}" + arch: aarch64,arm64ec + - Identifier: ["_vaddlv{neon_type[0].no}(a) as i16", Symbol] + + - name: "vaddlv{neon_type[0].no}" + doc: "Unsigned Add Long across Vector" + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [uaddlv] + safety: + unsafe: [neon] + types: + - [uint8x8_t, u16] + - [uint8x16_t, u16] + compose: + - LLVMLink: + name: "vaddlv{neon_type[0].no}" + return_type: "i32" + links: + - link: "llvm.aarch64.neon.uaddlv.i32.{neon_type[0]}" + arch: aarch64,arm64ec + - Identifier: ["_vaddlv{neon_type[0].no}(a.as_signed()).as_unsigned() as u16", Symbol] + + - name: "vmaxv{neon_type[0].no}" + doc: "Horizontal vector max." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [int8x8_t, i8, 'smaxv'] + - [int16x4_t, i16, 'smaxv'] + - [int32x2_t, i32, 'smaxp'] + - [int8x16_t, i8, 'smaxv'] + - [int16x8_t, i16, 'smaxv'] + - [int32x4_t, i32, 'smaxv'] + compose: + - LLVMLink: + name: "vmaxv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.smaxv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vmaxv{neon_type[0].no}" + doc: "Horizontal vector max." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [uint8x8_t, u8, 'umaxv'] + - [uint16x4_t, u16, 'umaxv'] + - [uint32x2_t, u32, 'umaxp'] + - [uint8x16_t, u8, 'umaxv'] + - [uint16x8_t, u16, 'umaxv'] + - [uint32x4_t, u32, 'umaxv'] + compose: + - LLVMLink: + name: "vmaxv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.umaxv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vmaxv{neon_type[0].no}" + doc: "Horizontal vector max." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [float32x2_t, f32, 'fmaxp'] + - [float32x4_t, f32, 'fmaxv'] + - [float64x2_t, f64, 'fmaxp'] + compose: + - LLVMLink: + name: "vmaxv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.fmaxv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vminv{neon_type[0].no}" + doc: "Horizontal vector min." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [int8x8_t, i8, 'sminv'] + - [int16x4_t, i16, 'sminv'] + - [int32x2_t, i32, 'sminp'] + - [int8x16_t, i8, 'sminv'] + - [int16x8_t, i16, 'sminv'] + - [int32x4_t, i32, 'sminv'] + compose: + - LLVMLink: + name: "vminv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.sminv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vminv{neon_type[0].no}" + doc: "Horizontal vector min." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [uint8x8_t, u8, 'uminv'] + - [uint16x4_t, u16, 'uminv'] + - [uint32x2_t, u32, 'uminp'] + - [uint8x16_t, u8, 'uminv'] + - [uint16x8_t, u16, 'uminv'] + - [uint32x4_t, u32, 'uminv'] + compose: + - LLVMLink: + name: "vminv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.uminv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vminv{neon_type[0].no}" + doc: "Horizontal vector min." + arguments: ["a: {neon_type[0]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: ['{type[2]}'] + safety: + unsafe: [neon] + types: + - [float32x2_t, f32, 'fminp'] + - [float32x4_t, f32, 'fminv'] + - [float64x2_t, f64, 'fminp'] + compose: + - LLVMLink: + name: "vminv{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.fminv.{type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['sminp'] + safety: + unsafe: [neon] + types: + - int8x16_t + - int16x8_t + - int32x4_t + compose: + - LLVMLink: + name: "vpmin{neon_type.no}" + links: + - link: "llvm.aarch64.neon.sminp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['uminp'] + safety: + unsafe: [neon] + types: + - uint8x16_t + - uint16x8_t + - uint32x4_t + compose: + - LLVMLink: + name: "vpmin{neon_type.no}" + links: + - link: "llvm.aarch64.neon.uminp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['fminp'] + safety: + unsafe: [neon] + types: + - float32x4_t + - float64x2_t + compose: + - LLVMLink: + name: "vpmin{neon_type.no}" + links: + - link: "llvm.aarch64.neon.fminp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['smaxp'] + safety: + unsafe: [neon] + types: + - int8x16_t + - int16x8_t + - int32x4_t + compose: + - LLVMLink: + name: "vpmax{neon_type.no}" + links: + - link: "llvm.aarch64.neon.smaxp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['umaxp'] + safety: + unsafe: [neon] + types: + - uint8x16_t + - uint16x8_t + - uint32x4_t + compose: + - LLVMLink: + name: "vpmax{neon_type.no}" + links: + - link: "llvm.aarch64.neon.umaxp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-stable + assert_instr: ['fmaxp'] + safety: + unsafe: [neon] + types: + - float32x4_t + - float64x2_t + compose: + - LLVMLink: + name: "vpmax{neon_type.no}" + links: + - link: "llvm.aarch64.neon.fmaxp.{neon_type}" + arch: aarch64,arm64ec + + - name: "vsli{neon_type[0].N}" + doc: "Shift Left and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sli, 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - *neon-stable + static_defs: ['const N: i32'] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'static_assert_uimm_bits!', 'N, 3'] + - [int8x16_t, 'static_assert_uimm_bits!', 'N, 3'] + - [int16x4_t, 'static_assert_uimm_bits!', 'N, 4'] + - [int16x8_t, 'static_assert_uimm_bits!', 'N, 4'] + - [int32x2_t, 'static_assert!', 'N >= 0 && N <= 31'] + - [int32x4_t, 'static_assert!', 'N >= 0 && N <= 31'] + - [int64x1_t, 'static_assert!', 'N >= 0 && N <= 63'] + - [int64x2_t, 'static_assert!', 'N >= 0 && N <= 63'] + compose: + - FnCall: ['{type[1]}', ['{type[2]}']] + - LLVMLink: + name: "vsli{neon_type[0].N}" + arguments: + - "a: {neon_type[0]}" + - "b: {neon_type[0]}" + - "n: i32" + links: + - link: "llvm.aarch64.neon.vsli.{neon_type[0]}" + arch: aarch64,arm64ec + - FnCall: ["_vsli{neon_type[0].N}", [a, b, N]] + + - name: "vsli{neon_type[0].N}" + doc: "Shift Left and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "{type[4]}"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sli, 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - *neon-stable + static_defs: ['const N: i32'] + safety: + unsafe: [neon] + types: + - [uint8x8_t, int8x8_t, 'static_assert_uimm_bits!', 'N, 3', "neon"] + - [uint8x16_t, int8x16_t, 'static_assert_uimm_bits!', 'N, 3', "neon"] + - [uint16x4_t, int16x4_t, 'static_assert_uimm_bits!', 'N, 4', "neon"] + - [uint16x8_t, int16x8_t, 'static_assert_uimm_bits!', 'N, 4', "neon"] + - [uint32x2_t, int32x2_t, 'static_assert!', 'N >= 0 && N <= 31', "neon"] + - [uint32x4_t, int32x4_t, 'static_assert!', 'N >= 0 && N <= 31', "neon"] + - [uint64x1_t, int64x1_t, 'static_assert!', 'N >= 0 && N <= 63', "neon"] + - [uint64x2_t, int64x2_t, 'static_assert!', 'N >= 0 && N <= 63', "neon"] + - [poly8x8_t, int8x8_t, 'static_assert_uimm_bits!', 'N, 3', "neon"] + - [poly8x16_t, int8x16_t, 'static_assert_uimm_bits!', 'N, 3', "neon"] + - [poly16x4_t, int16x4_t, 'static_assert_uimm_bits!', 'N, 4', "neon"] + - [poly16x8_t, int16x8_t, 'static_assert_uimm_bits!', 'N, 4', "neon"] + - [poly64x1_t, int64x1_t, 'static_assert!', 'N >= 0 && N <= 63', "neon,aes"] + - [poly64x2_t, int64x2_t, 'static_assert!', 'N >= 0 && N <= 63', "neon,aes"] + compose: + - FnCall: ['{type[2]}', ['{type[3]}']] + - FnCall: + - transmute + - - FnCall: + - 'vsli{neon_type[1].N}::' + - - FnCall: + - transmute + - - a + - FnCall: + - transmute + - - b + + - name: "vsri{neon_type[0].N}" + doc: "Shift Right and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sri, 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - *neon-stable + static_defs: ['const N: i32'] + safety: + unsafe: [neon] + types: + - [int8x8_t, 'N >= 1 && N <= 8'] + - [int8x16_t, 'N >= 1 && N <= 8'] + - [int16x4_t, 'N >= 1 && N <= 16'] + - [int16x8_t, 'N >= 1 && N <= 16'] + - [int32x2_t, 'N >= 1 && N <= 32'] + - [int32x4_t, 'N >= 1 && N <= 32'] + - [int64x1_t, 'N >= 1 && N <= 64'] + - [int64x2_t, 'N >= 1 && N <= 64'] + compose: + - FnCall: ['static_assert!', ['{type[1]}']] + - LLVMLink: + name: "vsri{neon_type[0].N}" + arguments: + - "a: {neon_type[0]}" + - "b: {neon_type[0]}" + - "n: i32" + links: + - link: "llvm.aarch64.neon.vsri.{neon_type[0]}" + arch: aarch64,arm64ec + - FnCall: ["_vsri{neon_type[0].N}", [a, b, N]] + + - name: "vsri{neon_type[0].N}" + doc: "Shift Right and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "{type[3]}"']] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sri, 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + - *neon-stable + static_defs: ['const N: i32'] + safety: + unsafe: [neon] + types: + - [uint8x8_t, int8x8_t, 'N >= 1 && N <= 8', "neon"] + - [uint8x16_t, int8x16_t, 'N >= 1 && N <= 8', "neon"] + - [uint16x4_t, int16x4_t, 'N >= 1 && N <= 16', "neon"] + - [uint16x8_t, int16x8_t, 'N >= 1 && N <= 16', "neon"] + - [uint32x2_t, int32x2_t, 'N >= 1 && N <= 32', "neon"] + - [uint32x4_t, int32x4_t, 'N >= 1 && N <= 32', "neon"] + - [uint64x1_t, int64x1_t, 'N >= 1 && N <= 64', "neon"] + - [uint64x2_t, int64x2_t, 'N >= 1 && N <= 64', "neon"] + - [poly8x8_t, int8x8_t, 'N >= 1 && N <= 8', "neon"] + - [poly8x16_t, int8x16_t, 'N >= 1 && N <= 8', "neon"] + - [poly16x4_t, int16x4_t, 'N >= 1 && N <= 16', "neon"] + - [poly16x8_t, int16x8_t, 'N >= 1 && N <= 16', "neon"] + - [poly64x1_t, int64x1_t, 'N >= 1 && N <= 64', "neon,aes"] + - [poly64x2_t, int64x2_t, 'N >= 1 && N <= 64', "neon,aes"] + compose: + - FnCall: ['static_assert!', ['{type[2]}']] + - FnCall: + - transmute + - - FnCall: + - 'vsri{neon_type[1].N}::' + - - FnCall: + - transmute + - - a + - FnCall: + - transmute + - - b diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index a3c26a709b..0967476950 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -2,6 +2,8 @@ arch_cfgs: - arch_name: aarch64 target_feature: [neon] llvm_prefix: llvm.aarch64.neon +# Generate big endian shuffles +auto_big_endian: true # Repeatedly used anchors # #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -31,6 +33,9 @@ target-is-arm: &target-is-arm target-not-arm: &target-not-arm FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm"']]}]] +not-arm: ¬-arm + FnCall: [not, ['target_arch = "arm"']] + neon-target-aarch64-arm64ec: &neon-target-aarch64-arm64ec FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]] @@ -66,6 +71,12 @@ neon-unstable-i8mm: &neon-unstable-i8mm neon-unstable-fcma: &neon-unstable-fcma FnCall: [unstable, ['feature = "stdarch_neon_fcma"', 'issue = "117222"']] +arm-crc-unstable: &arm-crc-unstable + FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_aarch32_crc32"', 'issue = "125085"']]}]] + +aarch64-crc-stable: &aarch64-crc-stable + FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "stdarch_aarch64_crc32"', 'since = "1.80.0"']]}]] + intrinsics: - name: "vand{neon_type.no}" doc: Vector bitwise and @@ -612,24 +623,6 @@ intrinsics: - "vcls{neon_type[1].no}" - - FnCall: [transmute, [a]] - - name: "vclz{neon_type.no}" - doc: "Count leading zero bits" - arguments: ["a: {neon_type}"] - return_type: "{neon_type}" - attr: - - FnCall: [cfg_attr, [target_arch = "arm", {FnCall: [target_feature, ['enable = "v7"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, 'target_arch = "arm"']]}, {FnCall: [assert_instr, ['"vclz.i8"']]}]] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [any, ['target_arch = "aarch64"', 'target_arch = "arm64ec"']]}]]}, {FnCall: [assert_instr, [clz]]}]] - - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]] - - FnCall: [cfg_attr, ['target_arch = "arm"', {FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']]}]] - safety: - unsafe: [neon] - types: - - int8x8_t - - int8x16_t - compose: - - FnCall: ["vclz{neon_type.no}_", [a]] - - name: "vclz{neon_type[0].no}" doc: "Count leading zero bits" arguments: ["a: {neon_type[0]}"] @@ -649,7 +642,7 @@ intrinsics: - FnCall: - transmute - - FnCall: - - "vclz{neon_type[1].no}_" + - "vclz{neon_type[1].no}" - - FnCall: [transmute, [a]] - name: "vclz{neon_type[0].no}" @@ -665,12 +658,20 @@ intrinsics: safety: unsafe: [neon] types: + - [int8x8_t, '.i8'] + - [int8x16_t, '.i8'] - [int16x4_t, '.i16'] - [int16x8_t, '.i16'] - [int32x2_t, '.i32'] - [int32x4_t, '.i32'] compose: - - FnCall: ["vclz{neon_type[0].no}_", [a]] + - LLVMLink: + name: "vclz{neon_type[0].no}" + links: + - link: "llvm.ctlz.{neon_type[0]}" + arch: arm + - link: "llvm.ctlz.{neon_type[0]}" + arch: aarch64,arm64ec - name: "vclz{neon_type[0].no}" doc: "Count leading zero bits" @@ -693,7 +694,7 @@ intrinsics: - FnCall: - transmute - - FnCall: - - "vclz{neon_type[2].no}_" + - "vclz{neon_type[2].no}" - - FnCall: [transmute, [a]] - name: "vcagt{neon_type[0].no}" @@ -1281,14 +1282,15 @@ intrinsics: safety: unsafe: [neon] types: - - [int8x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] - - [int16x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] - - [uint8x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] - - [uint16x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] - - [poly8x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] - - [poly16x8_t, ' static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [int8x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [int16x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [uint8x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [uint16x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [poly8x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [poly16x8_t, ' static_assert_uimm_bits!(N, 3);', 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1305,11 +1307,12 @@ intrinsics: safety: unsafe: [neon] types: - - [int8x16_t, ' static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] - - [uint8x16_t, ' static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] - - [poly8x16_t, ' static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] + - [int8x16_t, ' static_assert_uimm_bits!(N, 4);', 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] + - [uint8x16_t, ' static_assert_uimm_bits!(N, 4);', 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] + - [poly8x16_t, ' static_assert_uimm_bits!(N, 4);', 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1326,14 +1329,15 @@ intrinsics: safety: unsafe: [neon] types: - - [int16x4_t, 'static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] - - [int32x4_t, ' static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] - - [uint16x4_t, ' static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] - - [uint32x4_t, ' static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] - - [poly16x4_t, ' static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] - - [float32x4_t, ' static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [int16x4_t, 'static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [int32x4_t, ' static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [uint16x4_t, ' static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [uint32x4_t, ' static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [poly16x4_t, ' static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [float32x4_t, ' static_assert_uimm_bits!(N, 2);', 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1350,11 +1354,12 @@ intrinsics: safety: unsafe: [neon] types: - - [int32x2_t, ' static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] - - [uint32x2_t, ' static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] - - [float32x2_t, ' static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [int32x2_t, ' static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [uint32x2_t, ' static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [float32x2_t, ' static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1371,10 +1376,11 @@ intrinsics: safety: unsafe: [neon] types: - - [int64x2_t, 'static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] - - [uint64x2_t, 'static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [int64x2_t, 'static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [uint64x2_t, 'static_assert_uimm_bits!(N, 1);', 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - Identifier: ["{type[1]}", Symbol] + - Identifier: ["{type[2]}", Symbol] - name: "vmla{neon_type[0].no}" doc: "Multiply-add to accumulator" @@ -9787,3 +9793,1975 @@ intrinsics: - "transmute" - - FnCall: ["vld4{neon_type[2].dup_nox}", [{FnCall: [transmute, [a]]}]] + - name: "vld1{type[0]}" + visibility: private + doc: "Load multiple single-element structures to one, two, three, or four registers" + arguments: ["a: {type[1]}", "b: {type[2]}"] + return_type: "{neon_type[3]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] + - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] + safety: + unsafe: [neon] + types: + - ["_v8i8", "*const i8", "i32", "int8x8_t"] + - ["q_v16i8", "*const i8", "i32", "int8x16_t"] + - ["_v4i16", "*const i8", "i32", "int16x4_t"] + - ["q_v8i16", "*const i8", "i32", "int16x8_t"] + - ["_v2i32", "*const i8", "i32", "int32x2_t"] + - ["q_v4i32", "*const i8", "i32", "int32x4_t"] + - ["_v1i64", "*const i8", "i32", "int64x1_t"] + - ["q_v2i64", "*const i8", "i32", "int64x2_t"] + - ["_v2f32", "*const i8", "i32", "float32x2_t"] + - ["q_v4f32", "*const i8", "i32", "float32x4_t"] + compose: + - LLVMLink: + name: "vld1.{type[0]}" + links: + - link: "llvm.arm.neon.vld1.{neon_type[3]}" + arch: arm + - FnCall: ["_vld1{type[0]}", [a, b]] + + - name: "vld1{neon_type[1].no}" + doc: "Load multiple single-element structures to one, two, three, or four registers." + arguments: ["ptr: {type[0]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vld1.{type[2]}"']]}]] + types: + - ['*const i8', int8x8_t, '8', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const i8', int8x16_t, '8', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const i16', int16x4_t, '16', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const i16', int16x8_t, '16', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const i32', int32x2_t, '32', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*const i32', int32x4_t, '32', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*const i64', int64x1_t, '64', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*const i64', int64x2_t, '64', 'crate::mem::align_of::() as i32', 'q_v2i64'] + compose: + - FnCall: + - "vld1{type[4]}" + - - 'ptr as *const i8' + - '{type[3]}' + + - name: "vld1{neon_type[1].no}" + doc: "Load multiple single-element structures to one, two, three, or four registers." + arguments: ["ptr: {type[0]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - FnCall: [target_feature, ['enable = "{type[3]}"']] + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vld1.{type[2]}"']]}]] + types: + - ['*const u8', uint8x8_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const u8', uint8x16_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const u16', uint16x4_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const u16', uint16x8_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const u32', uint32x2_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*const u32', uint32x4_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*const u64', uint64x1_t, '64', 'neon,v7', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*const u64', uint64x2_t, '64', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*const p8', poly8x8_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const p8', poly8x16_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const p16', poly16x4_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const p16', poly16x8_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const p64', poly64x1_t, '64', 'neon,aes', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*const p64', poly64x2_t, '64', 'neon,aes', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*const f32', float32x2_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2f32'] + - ['*const f32', float32x4_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4f32'] + compose: + - FnCall: + - transmute + - - FnCall: + - "vld1{type[5]}" + - - 'ptr as *const i8' + - '{type[4]}' + + - name: "vtbx1" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - "int8x8_t" + compose: + - LLVMLink: + name: "vtbx1" + links: + - link: "llvm.arm.neon.vtbx1" + arch: arm + + - name: "vtbx1_s8" + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - int8x8_t + compose: + - FnCall: [vtbx1, [a, b, c]] + + - name: "vtbx1{neon_type.no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: uint8x8_t"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - uint8x8_t + - poly8x8_t + compose: + - FnCall: + - transmute + - - FnCall: + - vtbx1 + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + - FnCall: [transmute, [c]] + + - name: "vtbx2" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}", "d: {neon_type}"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - "int8x8_t" + compose: + - LLVMLink: + name: "vtbx2" + links: + - link: "llvm.arm.neon.vtbx2" + arch: arm + + - name: "vtbx2_s8" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - [int8x8_t, int8x8x2_t] + compose: + - FnCall: [vtbx2, [a, 'b.0', 'b.1', c]] + + - name: "vtbx2{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - [uint8x8_t, uint8x8x2_t, uint8x8_t] + - [poly8x8_t, poly8x8x2_t, uint8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbx2 + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['b.0']] + - FnCall: [transmute, ['b.1']] + - FnCall: [transmute, [c]] + + + - name: "vtbx3" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}", "d: {neon_type}", "e: {neon_type}"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - "int8x8_t" + compose: + - LLVMLink: + name: "vtbx3" + links: + - link: "llvm.arm.neon.vtbx3" + arch: arm + + - name: "vtbx3_s8" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - [int8x8_t, int8x8x3_t] + compose: + - FnCall: [vtbx3, [a, 'b.0', 'b.1', 'b.2', c]] + + - name: "vtbx3{neon_type[0].no}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - [uint8x8_t, uint8x8x3_t, uint8x8_t] + - [poly8x8_t, poly8x8x3_t, uint8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbx3 + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ['b.0']] + - FnCall: [transmute, ['b.1']] + - FnCall: [transmute, ['b.2']] + - FnCall: [transmute, [c]] + + - name: "vtbx4" + visibility: private + doc: "Extended table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}", "d: {neon_type}", "e: {neon_type}", "f: {neon_type}"] + return_type: "{neon_type}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - "int8x8_t" + compose: + - LLVMLink: + name: "vtbx4" + links: + - link: "llvm.arm.neon.vtbx4" + arch: arm + + - name: "vtbx4{neon_type[0].noq}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - ["uint8x8_t", "uint8x8x4_t", "uint8x8_t"] + - ["poly8x8_t", "poly8x8x4_t", "uint8x8_t"] + compose: + - FnCall: + - "transmute" + - - FnCall: + - vtbx4 + - - FnCall: [transmute, [a]] + - FnCall: [transmute, ["b.0"]] + - FnCall: [transmute, ["b.1"]] + - FnCall: [transmute, ["b.2"]] + - FnCall: [transmute, ["b.3"]] + - FnCall: [transmute, [c]] + + - name: "vtbx4{neon_type[0].noq}" + doc: "Extended table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - *enable-v7 + - *target-is-arm + - *neon-unstable + assert_instr: [vtbx] + safety: + unsafe: [neon] + types: + - ["int8x8_t", "int8x8x4_t"] + big_endian_inverse: true + compose: + - FnCall: + - vtbx4 + - - a + - FnCall: [transmute, ["b.0"]] + - FnCall: [transmute, ["b.1"]] + - FnCall: [transmute, ["b.2"]] + - FnCall: [transmute, ["b.3"]] + - c + + - name: "vcombine{neon_type[0].noq}" + doc: "Vector combine" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [float32x2_t, float32x4_t, '[0, 1, 2, 3]'] + - [poly8x8_t, poly8x16_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - [poly16x4_t, poly16x8_t, '[0, 1, 2, 3, 4, 5, 6, 7]'] + - [int8x8_t, int8x16_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - [int16x4_t, int16x8_t, '[0, 1, 2, 3, 4, 5, 6, 7]'] + - [int32x2_t, int32x4_t, '[0, 1, 2, 3]'] + - [int64x1_t, int64x2_t, '[0, 1]'] + - [uint8x8_t, uint8x16_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - [uint16x4_t, uint16x8_t, '[0, 1, 2, 3, 4, 5, 6, 7]'] + - [uint32x2_t, uint32x4_t, '[0, 1, 2, 3]'] + - [uint64x1_t, uint64x2_t, '[0, 1]'] + - [poly64x1_t, poly64x2_t, '[0, 1]'] + compose: + - FnCall: [simd_shuffle!, [a, b, '{type[2]}']] + + - name: "vaeseq_u8" + doc: "AES single round encryption." + arguments: ["data: {neon_type}", "key: {neon_type}"] + return_type: "{neon_type}" + attr: + - FnCall: [target_feature, ['enable = "aes"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, [aese]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - uint8x16_t + compose: + - LLVMLink: + name: "vaeseq_u8" + links: + - link: "llvm.aarch64.crypto.aese" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.aese" + arch: arm + + - name: "vaesdq_u8" + doc: "AES single round encryption." + arguments: ["data: {neon_type}", "key: {neon_type}"] + return_type: "{neon_type}" + attr: + - FnCall: [target_feature, ['enable = "aes"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, [aesd]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - uint8x16_t + compose: + - LLVMLink: + name: "vaesdq_u8" + links: + - link: "llvm.aarch64.crypto.aesd" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.aesd" + arch: arm + + - name: "vaesmcq_u8" + doc: "AES mix columns." + arguments: ["data: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "aes"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint8x16_t, "aesmc"] + compose: + - LLVMLink: + name: "vaesmcq_u8" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vaesimcq_u8" + doc: "AES inverse mix columns." + arguments: ["data: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "aes"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint8x16_t, "aesimc"] + compose: + - LLVMLink: + name: "vaesimcq_u8" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1h_u32" + doc: "SHA1 fixed rotate." + arguments: ["hash_e: {type[0]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [u32, "sha1h"] + compose: + - LLVMLink: + name: "vsha1h_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1cq_u32" + doc: "SHA1 hash update accelerator, choose." + arguments: ["hash_abcd: {neon_type[2]}", "hash_e: {type[0]}", "wk: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [u32, "sha1c", "uint32x4_t"] + compose: + - LLVMLink: + name: "vsha1cq_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1mq_u32" + doc: "SHA1 hash update accelerator, majority" + arguments: ["hash_abcd: {neon_type[2]}", "hash_e: {type[0]}", "wk: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [u32, "sha1m", "uint32x4_t"] + compose: + - LLVMLink: + name: "vsha1mq_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1pq_u32" + doc: "SHA1 hash update accelerator, parity" + arguments: ["hash_abcd: {neon_type[2]}", "hash_e: {type[0]}", "wk: {neon_type[2]}"] + return_type: "{neon_type[2]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [u32, "sha1p", "uint32x4_t"] + compose: + - LLVMLink: + name: "vsha1pq_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1su0q_u32" + doc: "SHA1 schedule update accelerator, first part." + arguments: ["w0_3: {neon_type[0]}", "w4_7: {neon_type[0]}", "w8_11: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha1su0"] + compose: + - LLVMLink: + name: "vsha1su0q_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha1su1q_u32" + doc: "SHA1 schedule update accelerator, second part." + arguments: ["tw0_3: {neon_type[0]}", "w12_15: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha1su1"] + compose: + - LLVMLink: + name: "vsha1su0q_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha256hq_u32" + doc: "SHA1 schedule update accelerator, first part." + arguments: ["hash_abcd: {neon_type[0]}", "hash_efgh: {neon_type[0]}", "wk: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha256h"] + compose: + - LLVMLink: + name: "vsha256hq_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha256h2q_u32" + doc: "SHA1 schedule update accelerator, upper part." + arguments: ["hash_abcd: {neon_type[0]}", "hash_efgh: {neon_type[0]}", "wk: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha256h2"] + compose: + - LLVMLink: + name: "vsha256h2q_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha256su0q_u32" + doc: "SHA256 schedule update accelerator, first part." + arguments: ["w0_3: {neon_type[0]}", "w4_7: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha256su0"] + compose: + - LLVMLink: + name: "vsha256su0q_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "vsha256su1q_u32" + doc: "SHA256 schedule update accelerator, second part." + arguments: ["tw0_3: {neon_type[0]}", "w8_11: {neon_type[0]}", "w12_15: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - FnCall: [target_feature, ['enable = "sha2"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["{type[1]}"]] }]] + - *neon-unstable-is-arm + - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] + safety: + unsafe: [neon] + types: + - [uint32x4_t, "sha256su1"] + compose: + - LLVMLink: + name: "vsha256su1q_u32" + links: + - link: "llvm.aarch64.crypto.{type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.{type[1]}" + arch: arm + + - name: "__crc32b" + doc: "CRC32 single round checksum for bytes (8 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32b"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u8] + compose: + - LLVMLink: + name: "crc32b" + arguments: + - "crc: u32" + - "data: u32" + links: + - link: "llvm.aarch64.crc32b" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32b" + arch: arm + - FnCall: ["___crc32b", ["crc.as_signed()", "data.as_signed() as i32"]] + + - name: "__crc32h" + doc: "CRC32 single round checksum for bytes (16 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32h"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u16] + compose: + - LLVMLink: + name: "crc32h" + arguments: + - "crc: u32" + - "data: u32" + links: + - link: "llvm.aarch64.crc32h" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32h" + arch: arm + - FnCall: ["___crc32h", ["crc.as_signed()", "data.as_signed() as i32"]] + + - name: "__crc32w" + doc: "CRC32 single round checksum for bytes (32 bits)." + arguments: ["crc: {type}", "data: {type}"] + return_type: "{type}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32w"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - u32 + compose: + - LLVMLink: + name: "crc32w" + links: + - link: "llvm.aarch64.crc32w" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32w" + arch: arm + + - name: "__crc32cb" + doc: "CRC32-C single round checksum for bytes (8 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32cb"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u8] + compose: + - LLVMLink: + name: "crc32cb" + arguments: + - "crc: u32" + - "data: u32" + links: + - link: "llvm.aarch64.crc32cb" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32cb" + arch: arm + - FnCall: ["___crc32cb", ["crc.as_signed()", "data.as_signed() as i32"]] + + - name: "__crc32ch" + doc: "CRC32-C single round checksum for bytes (16 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32ch"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - [u32, u16] + compose: + - LLVMLink: + name: "crc32ch" + arguments: + - "crc: u32" + - "data: u32" + links: + - link: "llvm.aarch64.crc32ch" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32ch" + arch: arm + - FnCall: ["___crc32ch", ["crc.as_signed()", "data.as_signed() as i32"]] + + - name: "__crc32cw" + doc: "CRC32-C single round checksum for bytes (32 bits)." + arguments: ["crc: {type}", "data: {type}"] + return_type: "{type}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - *neon-v8 + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32cw"]] }]] + - *arm-crc-unstable + - *aarch64-crc-stable + safety: + unsafe: [neon] + types: + - u32 + compose: + - LLVMLink: + name: "crc32cw" + links: + - link: "llvm.aarch64.crc32cw" + arch: aarch64,arm64ec + - link: "llvm.arm.crc32cw" + arch: arm + + - name: "__crc32d" + doc: "CRC32 single round checksum for quad words (64 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - FnCall: [cfg, ['target_arch = "arm"']] + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32w"]] }]] + - *arm-crc-unstable + safety: + unsafe: [neon] + types: + - [u32, u64] + compose: + - FnCall: + - __crc32w + - - FnCall: + - __crc32w + - - crc + - '(data & 0xFFFFFFFF) as u32' + - '(data >> 32) as u32' + + - name: "__crc32cd" + doc: "CRC32-C single round checksum for quad words (64 bits)." + arguments: ["crc: {type[0]}", "data: {type[1]}"] + return_type: "{type[0]}" + attr: + - FnCall: [target_feature, ['enable = "crc"']] + - FnCall: [cfg, ['target_arch = "arm"']] + - FnCall: [cfg_attr, [test, { FnCall: [assert_instr, ["crc32cw"]] }]] + - *arm-crc-unstable + safety: + unsafe: [neon] + types: + - [u32, u64] + compose: + - FnCall: + - __crc32cw + - - FnCall: + - __crc32cw + - - crc + - '(data & 0xFFFFFFFF) as u32' + - '(data >> 32) as u32' + + - name: "vabs{neon_type.no}" + doc: "Absolute value (wrapping)." + arguments: ["a: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vabs]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [abs]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - int8x8_t + - int16x4_t + - int32x2_t + - int8x16_t + - int16x8_t + - int32x4_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.abs.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vabs.{neon_type}" + arch: arm + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sminp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - int8x8_t + - int16x4_t + - int32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.sminp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpmins.{neon_type}" + arch: arm + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpminu]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uminp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - uint8x8_t + - uint16x4_t + - uint32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.uminp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpminu.{neon_type}" + arch: arm + + - name: "vpmin{neon_type.no}" + doc: "Folding minimum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmins]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fminp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - float32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.fminp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpmins.{neon_type}" + arch: arm + + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smaxp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - int8x8_t + - int16x4_t + - int32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.smaxp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpmaxs.{neon_type}" + arch: arm + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmaxu]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umaxp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - uint8x8_t + - uint16x4_t + - uint32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.umaxp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpmaxu.{neon_type}" + arch: arm + + - name: "vpmax{neon_type.no}" + doc: "Folding maximum of adjacent pairs" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmaxs]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmaxp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - float32x2_t + compose: + - LLVMLink: + name: "vabs{neon_type.no}" + links: + - link: "llvm.aarch64.neon.fmaxp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpmaxs.{neon_type}" + arch: arm + + - name: "vraddhn{neon_type[0].noq}" + doc: "Rounding Add returning High Narrow." + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [int16x8_t, int8x8_t, 'vraddhn.i16'] + - [int32x4_t, int16x4_t, 'vraddhn.i32'] + - [int64x2_t, int32x2_t, 'vraddhn.i64'] + compose: + - LLVMLink: + name: "vraddhn{neon_type[0].noq}" + links: + - link: "llvm.aarch64.neon.raddhn.{neon_type[0]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vraddhn.{neon_type[0]}" + arch: arm + + - name: "vraddhn{neon_type[0].noq}" + doc: "Rounding Add returning High Narrow." + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[1]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [uint16x8_t, uint8x8_t, 'vraddhn.i16', int16x8_t] + - [uint32x4_t, uint16x4_t, 'vraddhn.i32', int32x4_t] + - [uint64x2_t, uint32x2_t, 'vraddhn.i64', int64x2_t] + compose: + - FnCall: + - transmute + - - FnCall: + - "vraddhn{neon_type[3].noq}" + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + + - name: "vraddhn_high{neon_type[1].noq}" + doc: "Rounding Add returning High Narrow (high half)." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn2]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [uint8x8_t , uint16x8_t, uint8x16_t, 'vraddhn.i16', int16x8_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - [uint16x4_t, uint32x4_t, uint16x8_t, 'vraddhn.i32', int32x4_t, '[0, 1, 2, 3, 4, 5, 6, 7]'] + - [uint32x2_t, uint64x2_t, uint32x4_t, 'vraddhn.i64', int64x2_t, '[0, 1, 2, 3]'] + compose: + - Let: + - x + - "{neon_type[0]}" + - FnCall: + - transmute + - - FnCall: + - "vraddhn{neon_type[4].noq}" + - - FnCall: [transmute, [b]] + - FnCall: [transmute, [c]] + - FnCall: ["simd_shuffle!", [a, x, '{type[5]}']] + + - name: "vraddhn_high{neon_type[1].noq}" + doc: "Rounding Add returning High Narrow (high half)." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[2]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[3]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [raddhn2]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [int8x8_t , int16x8_t, int8x16_t, 'vraddhn.i16', '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] + - [int16x4_t, int32x4_t, int16x8_t, 'vraddhn.i32', '[0, 1, 2, 3, 4, 5, 6, 7]'] + - [int32x2_t, int64x2_t, int32x4_t, 'vraddhn.i64', '[0, 1, 2, 3]'] + compose: + - Let: + - x + - FnCall: + - "vraddhn{neon_type[1].noq}" + - - b + - c + - FnCall: ["simd_shuffle!", [a, x, '{type[4]}']] + + - name: "vpadd{neon_type.no}" + doc: "Add pairwise." + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [addp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - int8x8_t + - int16x4_t + - int32x2_t + compose: + - LLVMLink: + name: "vpadd{neon_type.no}" + links: + - link: "llvm.aarch64.neon.addp.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vaddp.{neon_type}" + arch: arm + + - name: "vpadd{neon_type[0].no}" + doc: "Add pairwise." + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [addp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + safety: + unsafe: [neon] + types: + - [uint8x8_t, int8x8_t] + - [uint16x4_t, int16x4_t] + - [uint32x2_t, int32x2_t] + compose: + - FnCall: + - transmute + - - FnCall: + - "vpadd{neon_type[1].no}" + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + + # This was not publically exposed + - name: "priv_vpadal{neon_type[1].no}" + visibility: private + doc: "Signed Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [sadalp]]}]] + - *neon-unstable-is-arm + types: + - [int16x4_t, int8x8_t] + - [int32x2_t, int16x4_t] + - [int64x1_t, int32x2_t] + - [int16x8_t, int8x16_t] + - [int32x4_t, int16x8_t] + - [int64x2_t, int32x4_t] + compose: + - LLVMLink: + name: "vpadal{neon_type[1].no}" + links: + - link: "llvm.arm.neon.vpadals.{neon_type[0]}.{neon_type[1]}" + arch: arm + + # This was not publically exposed + - name: "priv_vpadal{neon_type[1].no}" + visibility: private + doc: "Signed Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [uadalp]]}]] + - *neon-unstable-is-arm + types: + - [uint16x4_t, uint8x8_t] + - [uint32x2_t, uint16x4_t] + - [uint64x1_t, uint32x2_t] + - [uint16x8_t, uint8x16_t] + - [uint32x4_t, uint16x8_t] + - [uint64x2_t, uint32x4_t] + compose: + - LLVMLink: + name: "vpadal{neon_type[1].no}" + links: + - link: "llvm.arm.neon.vpadalu.{neon_type[0]}.{neon_type[1]}" + arch: arm + + - name: "vpaddl{neon_type[0].no}" + doc: "Signed Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [saddlp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - [int8x8_t, int16x4_t , 'vpadal.s8'] + - [int16x4_t, int32x2_t, 'vpadal.s16'] + - [int32x2_t, int64x1_t, 'vpadal.s32'] + - [int8x16_t, int16x8_t, 'vpadal.s8'] + - [int16x8_t, int32x4_t, 'vpadal.s16'] + - [int32x4_t, int64x2_t, 'vpadal.s32'] + compose: + - LLVMLink: + name: "vpaddl{neon_type[1].no}" + links: + - link: "llvm.aarch64.neon.saddlp.{neon_type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpaddls.{neon_type[1]}.{neon_type[0]}" + arch: arm + + - name: "vpaddl{neon_type[0].no}" + doc: "Unsigned Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uaddlp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - [uint8x8_t, uint16x4_t , 'vpadal.u8'] + - [uint16x4_t, uint32x2_t, 'vpadal.u16'] + - [uint32x2_t, uint64x1_t, 'vpadal.u32'] + - [uint8x16_t, uint16x8_t, 'vpadal.u8'] + - [uint16x8_t, uint32x4_t, 'vpadal.u16'] + - [uint32x4_t, uint64x2_t, 'vpadal.u32'] + compose: + - LLVMLink: + name: "vpaddl{neon_type[1].no}" + links: + - link: "llvm.aarch64.neon.uaddlp.{neon_type[1]}.{neon_type[0]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.vpaddlu.{neon_type[1]}.{neon_type[0]}" + arch: arm + + - name: "vpadal{neon_type[1].no}" + doc: "Signed Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [sadalp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - [int16x4_t, int8x8_t, 'vpadal.s8', 'let x: int16x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_s8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_s8(b), a);}'] + - [int32x2_t, int16x4_t, 'vpadal.s16', 'let x: int32x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_s16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_s16(b), a);}'] + - [int64x1_t, int32x2_t, 'vpadal.s32', 'let x: int64x1_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_s32(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_s32(b), a);}'] + - [int16x8_t, int8x16_t, 'vpadal.s8', 'let x: int16x8_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_s8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_s8(b), a);}'] + - [int32x4_t, int16x8_t, 'vpadal.s16', 'let x: int32x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_s16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_s16(b), a);}'] + - [int64x2_t, int32x4_t, 'vpadal.s32', 'let x: int64x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_s32(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_s32(b), a);}'] + compose: + - Identifier: ['{type[3]}', Symbol] + - Identifier: [x, Symbol] + + - name: "vpadal{neon_type[1].no}" + doc: "Unsigned Add and Accumulate Long Pairwise." + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uadalp]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - [uint16x4_t, uint8x8_t, 'vpadal.u8', 'let x: uint16x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_u8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_u8(b), a);}'] + - [uint32x2_t, uint16x4_t, 'vpadal.u16', 'let x: uint32x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_u16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_u16(b), a);}'] + - [uint64x1_t, uint32x2_t, 'vpadal.u32', 'let x: uint64x1_t; #[cfg(target_arch = "arm")] { x = priv_vpadal_u32(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddl_u32(b), a);}'] + - [uint16x8_t, uint8x16_t, 'vpadal.u8', 'let x: uint16x8_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_u8(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_u8(b), a);}'] + - [uint32x4_t, uint16x8_t, 'vpadal.u16', 'let x: uint32x4_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_u16(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_u16(b), a);}'] + - [uint64x2_t, uint32x4_t, 'vpadal.u32', 'let x: uint64x2_t; #[cfg(target_arch = "arm")] { x = priv_vpadalq_u32(a, b); } #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] { x = simd_add(vpaddlq_u32(b), a);}'] + compose: + - Identifier: ['{type[3]}', Symbol] + - Identifier: [x, Symbol] + + - name: "vcnt{neon_type.no}" + doc: "Population count per byte." + arguments: ["a: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcnt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cnt]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - int8x8_t + - int8x16_t + compose: + - LLVMLink: + name: "vcnt{neon_type.no}" + links: + - link: "llvm.ctpop.{neon_type}" + arch: aarch64,arm64ec + - link: "llvm.ctpop.{neon_type}" + arch: arm + + - name: "vcnt{neon_type[0].no}" + doc: "Population count per byte." + arguments: ["a: {neon_type[0]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-v7 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vcnt]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [cnt]]}]] + - *neon-stable-not-arm + - *neon-unstable-is-arm + types: + - [uint8x8_t, int8x8_t] + - [uint8x16_t, int8x16_t] + - [poly8x8_t, int8x8_t] + - [poly8x16_t, int8x16_t] + compose: + - FnCall: + - transmute + - - FnCall: + - "vcnt{neon_type[1].no}" + - - FnCall: + - transmute + - - a + + - name: "vmmla{neon_type[0].no}" + doc: "8-bit integer matrix multiply-accumulate" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-i8mm + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [smmla]]}]] + - *neon-unstable-i8mm + - *neon-unstable-is-arm + types: + - [int32x4_t, int8x16_t] + compose: + - LLVMLink: + name: "vmmla{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.smmla.{neon_type[0]}.{neon_type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.smmla.{neon_type[0]}.{neon_type[1]}" + arch: arm + + - name: "vmmla{neon_type[0].no}" + doc: "8-bit integer matrix multiply-accumulate" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-i8mm + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ummla]]}]] + - *neon-unstable-i8mm + - *neon-unstable-is-arm + types: + - [uint32x4_t, uint8x16_t] + compose: + - LLVMLink: + name: "vmmla{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.ummla.{neon_type[0]}.{neon_type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.ummla.{neon_type[0]}.{neon_type[1]}" + arch: arm + + - name: "vusmmla{neon_type[0].no}" + doc: "Unsigned and signed 8-bit integer matrix multiply-accumulate" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}", "c: {neon_type[2]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *neon-i8mm + - *neon-v8 + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [usmmla]]}]] + - *neon-unstable-i8mm + - *neon-unstable-is-arm + types: + - [int32x4_t, uint8x16_t, int8x16_t] + compose: + - LLVMLink: + name: "vmmla{neon_type[0].no}" + links: + - link: "llvm.aarch64.neon.usmmla.{neon_type[0]}.{neon_type[1]}" + arch: aarch64,arm64ec + - link: "llvm.arm.neon.usmmla.{neon_type[0]}.{neon_type[1]}" + arch: arm + + - name: "vtbl1" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - int8x8_t + compose: + - LLVMLink: + name: "vtbl1" + links: + - link: "llvm.arm.neon.vtbl1" + arch: arm + + - name: "vtbl1_s8" + doc: "Table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - int8x8_t + compose: + - FnCall: [vtbl1, [a, b]] + + - name: "vtbl1{neon_type[0].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: uint8x8_t"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [uint8x8_t, uint8x8_t] + - [poly8x8_t, poly8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbl1 + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + + - name: "vtbl2" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - int8x8_t + compose: + - LLVMLink: + name: "vtbl2" + links: + - link: "llvm.arm.neon.vtbl2" + arch: arm + + - name: "vtbl2_s8" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [int8x8x2_t, int8x8_t] + compose: + - FnCall: [vtbl2, ['a.0', 'a.1', b]] + + - name: "vtbl2{neon_type[1].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: uint8x8_t"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [uint8x8x2_t, uint8x8_t] + - [poly8x8x2_t, poly8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbl2 + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - FnCall: [transmute, [b]] + + - name: "vtbl3" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}", "d: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - int8x8_t + compose: + - LLVMLink: + name: "vtbl3" + links: + - link: "llvm.arm.neon.vtbl3" + arch: arm + + - name: "vtbl3_s8" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [int8x8x3_t, int8x8_t] + compose: + - FnCall: [vtbl3, ['a.0', 'a.1', 'a.2', b]] + + - name: "vtbl3{neon_type[1].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: uint8x8_t"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [uint8x8x3_t, uint8x8_t] + - [poly8x8x3_t, poly8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbl3 + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - FnCall: [transmute, ['a.2']] + - FnCall: [transmute, [b]] + + - name: "vtbl4" + visibility: private + doc: "Table look-up" + arguments: ["a: {neon_type}", "b: {neon_type}", "c: {neon_type}", "d: {neon_type}", "e: {neon_type}"] + return_type: "{neon_type}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - int8x8_t + compose: + - LLVMLink: + name: "vtbl4" + links: + - link: "llvm.arm.neon.vtbl4" + arch: arm + + - name: "vtbl4_s8" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [int8x8x4_t, int8x8_t] + compose: + - FnCall: [vtbl4, ['a.0', 'a.1', 'a.2', 'a.3', b]] + + - name: "vtbl4{neon_type[1].no}" + doc: "Table look-up" + arguments: ["a: {neon_type[0]}", "b: uint8x8_t"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + assert_instr: [vtbl] + types: + - [uint8x8x4_t, uint8x8_t] + - [poly8x8x4_t, poly8x8_t] + compose: + - FnCall: + - transmute + - - FnCall: + - vtbl4 + - - FnCall: [transmute, ['a.0']] + - FnCall: [transmute, ['a.1']] + - FnCall: [transmute, ['a.2']] + - FnCall: [transmute, ['a.3']] + - FnCall: [transmute, [b]] + + - name: "vst1{type[0]}" + visibility: private + doc: "Store multiple single-element structures from one, two, three, or four registers." + arguments: ["addr: {type[1]}", "val: {neon_type[2]}", "align: {type[3]}"] + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[4]}"']]}]] + types: + - ['_v8i8', '* const i8', int8x8_t, i32, '8'] + - ['q_v16i8', '* const i8', int8x16_t, i32, '8'] + - ['_v4i16', '* const i8', int16x4_t, i32, '16'] + - ['q_v8i16', '* const i8', int16x8_t, i32, '16'] + - ['_v2i32', '* const i8', int32x2_t, i32, '32'] + - ['q_v4i32', '* const i8', int32x4_t, i32, '32'] + - ['_v1i64', '* const i8', int64x1_t, i32, '64'] + - ['q_v2i64', '* const i8', int64x2_t, i32, '64'] + - ['_v2f32', '* const i8', float32x2_t, i32, '32'] + - ['q_v4f32', '* const i8', float32x4_t, i32, '32'] + compose: + - LLVMLink: + name: "_vst1{type[0]}" + links: + - link: "llvm.arm.neon.vst1.p0i8.{neon_type[2]}" + arch: arm + + - name: "vst1{neon_type[1].no}" + doc: "Store multiple single-element structures from one, two, three, or four registers." + arguments: ["ptr: {type[0]}", "a: {neon_type[1]}"] + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[2]}"']]}]] + types: + - ['*mut i8', int8x8_t, '8', 'a', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*mut i8', int8x16_t, '8', 'a', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*mut i16', int16x4_t, '16', 'a', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*mut i16', int16x8_t, '16', 'a', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*mut i32', int32x2_t, '32', 'a', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*mut i32', int32x4_t, '32', 'a', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*mut i64', int64x1_t, '64', 'a', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*mut i64', int64x2_t, '64', 'a', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*mut u8', uint8x8_t, '8', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*mut u8', uint8x16_t, '8', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*mut u16', uint16x4_t, '16', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*mut u16', uint16x8_t, '16', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*mut u32', uint32x2_t, '32', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*mut u32', uint32x4_t, '32', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*mut u64', uint64x1_t, '64', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*mut u64', uint64x2_t, '64', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*mut p8', poly8x8_t, '8', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*mut p8', poly8x16_t, '8', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*mut p16', poly16x4_t, '16', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*mut p16', poly16x8_t, '16', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*mut p64', poly64x1_t, '64', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*mut p64', poly64x2_t, '64', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*mut f32', float32x2_t, '32', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v2f32'] + - ['*mut f32', float32x4_t, '32', 'transmute(a)', 'crate::mem::align_of::() as i32', 'q_v4f32'] + compose: + - FnCall: + - "vst1{type[5]}" + - - 'ptr as *const i8' + - '{type[3]}' + - '{type[4]}' + + - name: "vshiftins{type[0]}" + visibility: private + doc: "Shift Right and Insert (immediate)" + arguments: ["a: {neon_type[1]}", "b: {neon_type[1]}", "c: {neon_type[1]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - *neon-v7 + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[2]}"']]}]] + types: + - ['_v8i8', "int8x8_t", '8'] + - ['_v16i8', 'int8x16_t', '8'] + - ['_v4i16', 'int16x4_t', '16'] + - ['_v8i16', 'int16x8_t', '16'] + - ['_v2i32', 'int32x2_t', '32'] + - ['_v4i32', 'int32x4_t', '32'] + - ['_v1i64', 'int64x1_t', '64'] + - ['_v2i64', 'int64x2_t', '64'] + compose: + - LLVMLink: + name: "_vshiftins{type[0]}" + links: + - link: "llvm.arm.neon.vshiftins.{neon_type[1]}" + arch: arm + + - name: "vsri{neon_type[0].N}" + doc: "Shift Right and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - FnCall: [target_feature, ['enable = "{type[1]}"']] + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[2]}"', 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + static_defs: ['const N: i32'] + types: + - [uint8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] + - [uint8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] + - [uint16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] + - [uint16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] + - [uint32x2_t, "neon,v7", '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N'] + - [uint32x4_t, "neon,v7", '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N'] + - [uint64x1_t, "neon,v7", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] + - [uint64x2_t, "neon,v7", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] + - [poly8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] + - [poly8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] + - [poly16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] + - [poly16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] + - [poly64x1_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] + - [poly64x2_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] + compose: + - FnCall: ["static_assert!", ['{type[3]}']] + - FnCall: + - 'transmute' + - - FnCall: + - "vshiftins_{type[4]}" + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + - FnCall: ["{type[5]}", ["{type[6]}"]] + + - name: "vsri{neon_type[0].N}" + doc: "Shift Right and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + static_defs: ['const N: i32'] + attr: + - FnCall: [target_feature, ['enable = "neon,v7"']] + - *target-is-arm + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[1]}"', 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + safety: + unsafe: [neon] + types: + - [int8x8_t, '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] + - [int8x16_t, '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] + - [int16x4_t, '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] + - [int16x8_t, '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] + - [int32x2_t, '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N as i32'] + - [int32x4_t, '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N as i32'] + - [int64x1_t, '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] + - [int64x2_t, '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] + compose: + - FnCall: ["static_assert!", ['{type[2]}']] + - FnCall: + - "vshiftins_{type[3]}" + - - a + - b + - FnCall: ["{type[4]}", ["{type[5]}"]] + + - name: "vsli{neon_type[0].N}" + doc: "Shift Left and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - FnCall: [target_feature, ['enable = "{type[1]}"']] + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsli.{type[2]}"', 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + static_defs: ['const N: i32'] + types: + - [uint8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] + - [uint8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] + - [uint16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] + - [uint16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] + - [uint32x2_t, "neon,v7", '32', 'static_assert!', '0 >= N && N <= 31', 'v2i32', 'int32x2_t::splat', 'N as i32'] + - [uint32x4_t, "neon,v7", '32', 'static_assert!', '0 >= N && N <= 31', 'v4i32', 'int32x4_t::splat', 'N as i32'] + - [uint64x1_t, "neon,v7", '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + - [uint64x2_t, "neon,v7", '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + - [poly8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] + - [poly8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] + - [poly16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] + - [poly16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] + - [poly64x1_t, "neon,v7,aes", '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + - [poly64x2_t, "neon,v7,aes", '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + compose: + - FnCall: ["{type[3]}", ['{type[4]}']] + - FnCall: + - 'transmute' + - - FnCall: + - "vshiftins_{type[5]}" + - - FnCall: [transmute, [a]] + - FnCall: [transmute, [b]] + - FnCall: ["{type[6]}", ["{type[7]}"]] + + - name: "vsli{neon_type[0].N}" + doc: "Shift Left and Insert (immediate)" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" + safety: + unsafe: [neon] + attr: + - *target-is-arm + - FnCall: [target_feature, ['enable = "neon,v7"']] + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsli.{type[1]}"', 'N = 1']]}]] + - FnCall: [rustc_legacy_const_generics, ['2']] + static_defs: ['const N: i32'] + types: + - [int8x8_t, '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] + - [int8x16_t, '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] + - [int16x4_t, '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] + - [int16x8_t, '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] + - [int32x2_t, '32', 'static_assert!', '0 >= N && N <= 31', 'v2i32', 'int32x2_t::splat', 'N'] + - [int32x4_t, '32', 'static_assert!', '0 >= N && N <= 31', 'v4i32', 'int32x4_t::splat', 'N'] + - [int64x1_t, '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + - [int64x2_t, '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + compose: + - FnCall: ["{type[2]}", ['{type[3]}']] + - FnCall: + - "vshiftins_{type[4]}" + - - a + - b + - FnCall: ["{type[5]}", ["{type[6]}"]] From 11a00dd7229160a50e5d11183f200938c7069e9e Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 31 Jan 2025 11:16:13 +0000 Subject: [PATCH 03/13] Remove hand crafted intrinsics that are now generated --- crates/core_arch/src/aarch64/mod.rs | 3 - crates/core_arch/src/aarch64/neon/mod.rs | 4516 +---------- crates/core_arch/src/arm/mod.rs | 9 - crates/core_arch/src/arm/neon.rs | 1441 ---- crates/core_arch/src/arm_shared/crc.rs | 279 - crates/core_arch/src/arm_shared/crypto.rs | 544 -- crates/core_arch/src/arm_shared/mod.rs | 42 +- crates/core_arch/src/arm_shared/neon/mod.rs | 7938 +++++++------------ 8 files changed, 2941 insertions(+), 11831 deletions(-) delete mode 100644 crates/core_arch/src/arm_shared/crc.rs delete mode 100644 crates/core_arch/src/arm_shared/crypto.rs diff --git a/crates/core_arch/src/aarch64/mod.rs b/crates/core_arch/src/aarch64/mod.rs index 594c6d18c6..0defde52fd 100644 --- a/crates/core_arch/src/aarch64/mod.rs +++ b/crates/core_arch/src/aarch64/mod.rs @@ -10,10 +10,7 @@ mod mte; #[unstable(feature = "stdarch_aarch64_mte", issue = "129010")] pub use self::mte::*; -// NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) -#[cfg(target_endian = "little")] mod neon; -#[cfg(target_endian = "little")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 1b82a90719..961f950215 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -14,8 +14,7 @@ use crate::{ core_arch::{arm_shared::*, simd::*}, hint::unreachable_unchecked, intrinsics::simd::*, - mem::{transmute, zeroed}, - ptr::{read_unaligned, write_unaligned}, + mem::transmute, }; #[cfg(test)] use stdarch_test::assert_instr; @@ -71,300 +70,6 @@ pub struct float64x2x4_t( pub float64x2_t, ); -#[allow(improper_ctypes)] -unsafe extern "unadjusted" { - // absolute value - #[link_name = "llvm.aarch64.neon.abs.i64"] - fn vabsd_s64_(a: i64) -> i64; - #[link_name = "llvm.aarch64.neon.abs.v1i64"] - fn vabs_s64_(a: int64x1_t) -> int64x1_t; - #[link_name = "llvm.aarch64.neon.abs.v2i64"] - fn vabsq_s64_(a: int64x2_t) -> int64x2_t; - - #[link_name = "llvm.aarch64.neon.suqadd.v8i8"] - fn vuqadd_s8_(a: int8x8_t, b: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.suqadd.v16i8"] - fn vuqaddq_s8_(a: int8x16_t, b: uint8x16_t) -> int8x16_t; - #[link_name = "llvm.aarch64.neon.suqadd.v4i16"] - fn vuqadd_s16_(a: int16x4_t, b: uint16x4_t) -> int16x4_t; - #[link_name = "llvm.aarch64.neon.suqadd.v8i16"] - fn vuqaddq_s16_(a: int16x8_t, b: uint16x8_t) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.suqadd.v2i32"] - fn vuqadd_s32_(a: int32x2_t, b: uint32x2_t) -> int32x2_t; - #[link_name = "llvm.aarch64.neon.suqadd.v4i32"] - fn vuqaddq_s32_(a: int32x4_t, b: uint32x4_t) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.suqadd.v1i64"] - fn vuqadd_s64_(a: int64x1_t, b: uint64x1_t) -> int64x1_t; - #[link_name = "llvm.aarch64.neon.suqadd.v2i64"] - fn vuqaddq_s64_(a: int64x2_t, b: uint64x2_t) -> int64x2_t; - - #[link_name = "llvm.aarch64.neon.usqadd.v8i8"] - fn vsqadd_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - #[link_name = "llvm.aarch64.neon.usqadd.v16i8"] - fn vsqaddq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - #[link_name = "llvm.aarch64.neon.usqadd.v4i16"] - fn vsqadd_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - #[link_name = "llvm.aarch64.neon.usqadd.v8i16"] - fn vsqaddq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - #[link_name = "llvm.aarch64.neon.usqadd.v2i32"] - fn vsqadd_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - #[link_name = "llvm.aarch64.neon.usqadd.v4i32"] - fn vsqaddq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - #[link_name = "llvm.aarch64.neon.usqadd.v1i64"] - fn vsqadd_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - #[link_name = "llvm.aarch64.neon.usqadd.v2i64"] - fn vsqaddq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - - #[link_name = "llvm.aarch64.neon.addp.v8i16"] - fn vpaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.addp.v4i32"] - fn vpaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.addp.v2i64"] - fn vpaddq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; - #[link_name = "llvm.aarch64.neon.addp.v16i8"] - fn vpaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.saddv.i32.v4i16"] - fn vaddv_s16_(a: int16x4_t) -> i16; - #[link_name = "llvm.aarch64.neon.saddv.i32.v2i32"] - fn vaddv_s32_(a: int32x2_t) -> i32; - #[link_name = "llvm.aarch64.neon.saddv.i32.v8i8"] - fn vaddv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v4i16"] - fn vaddv_u16_(a: uint16x4_t) -> u16; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v2i32"] - fn vaddv_u32_(a: uint32x2_t) -> u32; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v8i8"] - fn vaddv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.saddv.i32.v8i16"] - fn vaddvq_s16_(a: int16x8_t) -> i16; - #[link_name = "llvm.aarch64.neon.saddv.i32.v4i32"] - fn vaddvq_s32_(a: int32x4_t) -> i32; - #[link_name = "llvm.aarch64.neon.saddv.i32.v16i8"] - fn vaddvq_s8_(a: int8x16_t) -> i8; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v8i16"] - fn vaddvq_u16_(a: uint16x8_t) -> u16; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v4i32"] - fn vaddvq_u32_(a: uint32x4_t) -> u32; - #[link_name = "llvm.aarch64.neon.uaddv.i32.v16i8"] - fn vaddvq_u8_(a: uint8x16_t) -> u8; - #[link_name = "llvm.aarch64.neon.saddv.i64.v2i64"] - fn vaddvq_s64_(a: int64x2_t) -> i64; - #[link_name = "llvm.aarch64.neon.uaddv.i64.v2i64"] - fn vaddvq_u64_(a: uint64x2_t) -> u64; - - #[link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"] - fn vaddlv_s8_(a: int8x8_t) -> i32; - #[link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"] - fn vaddlv_u8_(a: uint8x8_t) -> u32; - #[link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"] - fn vaddlvq_s8_(a: int8x16_t) -> i32; - #[link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"] - fn vaddlvq_u8_(a: uint8x16_t) -> u32; - - #[link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"] - fn vmaxv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"] - fn vmaxvq_s8_(a: int8x16_t) -> i8; - #[link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"] - fn vmaxv_s16_(a: int16x4_t) -> i16; - #[link_name = "llvm.aarch64.neon.smaxv.i16.v8i16"] - fn vmaxvq_s16_(a: int16x8_t) -> i16; - #[link_name = "llvm.aarch64.neon.smaxv.i32.v2i32"] - fn vmaxv_s32_(a: int32x2_t) -> i32; - #[link_name = "llvm.aarch64.neon.smaxv.i32.v4i32"] - fn vmaxvq_s32_(a: int32x4_t) -> i32; - - #[link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"] - fn vmaxv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"] - fn vmaxvq_u8_(a: uint8x16_t) -> u8; - #[link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"] - fn vmaxv_u16_(a: uint16x4_t) -> u16; - #[link_name = "llvm.aarch64.neon.umaxv.i16.v8i16"] - fn vmaxvq_u16_(a: uint16x8_t) -> u16; - #[link_name = "llvm.aarch64.neon.umaxv.i32.v2i32"] - fn vmaxv_u32_(a: uint32x2_t) -> u32; - #[link_name = "llvm.aarch64.neon.umaxv.i32.v4i32"] - fn vmaxvq_u32_(a: uint32x4_t) -> u32; - - #[link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"] - fn vmaxv_f32_(a: float32x2_t) -> f32; - #[link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"] - fn vmaxvq_f32_(a: float32x4_t) -> f32; - #[link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"] - fn vmaxvq_f64_(a: float64x2_t) -> f64; - - #[link_name = "llvm.aarch64.neon.sminv.i8.v8i8"] - fn vminv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.sminv.i8.v16i8"] - fn vminvq_s8_(a: int8x16_t) -> i8; - #[link_name = "llvm.aarch64.neon.sminv.i16.v4i16"] - fn vminv_s16_(a: int16x4_t) -> i16; - #[link_name = "llvm.aarch64.neon.sminv.i16.v8i16"] - fn vminvq_s16_(a: int16x8_t) -> i16; - #[link_name = "llvm.aarch64.neon.sminv.i32.v2i32"] - fn vminv_s32_(a: int32x2_t) -> i32; - #[link_name = "llvm.aarch64.neon.sminv.i32.v4i32"] - fn vminvq_s32_(a: int32x4_t) -> i32; - - #[link_name = "llvm.aarch64.neon.uminv.i8.v8i8"] - fn vminv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.uminv.i8.v16i8"] - fn vminvq_u8_(a: uint8x16_t) -> u8; - #[link_name = "llvm.aarch64.neon.uminv.i16.v4i16"] - fn vminv_u16_(a: uint16x4_t) -> u16; - #[link_name = "llvm.aarch64.neon.uminv.i16.v8i16"] - fn vminvq_u16_(a: uint16x8_t) -> u16; - #[link_name = "llvm.aarch64.neon.uminv.i32.v2i32"] - fn vminv_u32_(a: uint32x2_t) -> u32; - #[link_name = "llvm.aarch64.neon.uminv.i32.v4i32"] - fn vminvq_u32_(a: uint32x4_t) -> u32; - - #[link_name = "llvm.aarch64.neon.fminv.f32.v2f32"] - fn vminv_f32_(a: float32x2_t) -> f32; - #[link_name = "llvm.aarch64.neon.fminv.f32.v4f32"] - fn vminvq_f32_(a: float32x4_t) -> f32; - #[link_name = "llvm.aarch64.neon.fminv.f64.v2f64"] - fn vminvq_f64_(a: float64x2_t) -> f64; - - #[link_name = "llvm.aarch64.neon.sminp.v16i8"] - fn vpminq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; - #[link_name = "llvm.aarch64.neon.sminp.v8i16"] - fn vpminq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.sminp.v4i32"] - fn vpminq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.uminp.v16i8"] - fn vpminq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - #[link_name = "llvm.aarch64.neon.uminp.v8i16"] - fn vpminq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - #[link_name = "llvm.aarch64.neon.uminp.v4i32"] - fn vpminq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - #[link_name = "llvm.aarch64.neon.fminp.4f32"] - fn vpminq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; - #[link_name = "llvm.aarch64.neon.fminp.v2f64"] - fn vpminq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; - - #[link_name = "llvm.aarch64.neon.smaxp.v16i8"] - fn vpmaxq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; - #[link_name = "llvm.aarch64.neon.smaxp.v8i16"] - fn vpmaxq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.smaxp.v4i32"] - fn vpmaxq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.umaxp.v16i8"] - fn vpmaxq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - #[link_name = "llvm.aarch64.neon.umaxp.v8i16"] - fn vpmaxq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - #[link_name = "llvm.aarch64.neon.umaxp.v4i32"] - fn vpmaxq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - #[link_name = "llvm.aarch64.neon.fmaxp.4f32"] - fn vpmaxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; - #[link_name = "llvm.aarch64.neon.fmaxp.v2f64"] - fn vpmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; - - #[link_name = "llvm.aarch64.neon.tbl1.v8i8"] - fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbl1.v16i8"] - fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbx1.v8i8"] - fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbx1.v16i8"] - fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbl2.v8i8"] - fn vqtbl2(a0: int8x16_t, a1: int8x16_t, b: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbl2.v16i8"] - fn vqtbl2q(a0: int8x16_t, a1: int8x16_t, b: uint8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbx2.v8i8"] - fn vqtbx2(a: int8x8_t, b0: int8x16_t, b1: int8x16_t, c: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbx2.v16i8"] - fn vqtbx2q(a: int8x16_t, b0: int8x16_t, b1: int8x16_t, c: uint8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbl3.v8i8"] - fn vqtbl3(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbl3.v16i8"] - fn vqtbl3q(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x16_t) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbx3.v8i8"] - fn vqtbx3(a: int8x8_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, c: uint8x8_t) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbx3.v16i8"] - fn vqtbx3q( - a: int8x16_t, - b0: int8x16_t, - b1: int8x16_t, - b2: int8x16_t, - c: uint8x16_t, - ) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbl4.v8i8"] - fn vqtbl4(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, a3: int8x16_t, b: uint8x8_t) - -> int8x8_t; - #[link_name = "llvm.aarch64.neon.tbl4.v16i8"] - fn vqtbl4q( - a0: int8x16_t, - a1: int8x16_t, - a2: int8x16_t, - a3: int8x16_t, - b: uint8x16_t, - ) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.tbx4.v8i8"] - fn vqtbx4( - a: int8x8_t, - b0: int8x16_t, - b1: int8x16_t, - b2: int8x16_t, - b3: int8x16_t, - c: uint8x8_t, - ) -> int8x8_t; - - #[link_name = "llvm.aarch64.neon.tbx4.v16i8"] - fn vqtbx4q( - a: int8x16_t, - b0: int8x16_t, - b1: int8x16_t, - b2: int8x16_t, - b3: int8x16_t, - c: uint8x16_t, - ) -> int8x16_t; - - #[link_name = "llvm.aarch64.neon.vsli.v8i8"] - fn vsli_n_s8_(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.vsli.v16i8"] - fn vsliq_n_s8_(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; - #[link_name = "llvm.aarch64.neon.vsli.v4i16"] - fn vsli_n_s16_(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - #[link_name = "llvm.aarch64.neon.vsli.v8i16"] - fn vsliq_n_s16_(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.vsli.v2i32"] - fn vsli_n_s32_(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - #[link_name = "llvm.aarch64.neon.vsli.v4i32"] - fn vsliq_n_s32_(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.vsli.v1i64"] - fn vsli_n_s64_(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; - #[link_name = "llvm.aarch64.neon.vsli.v2i64"] - fn vsliq_n_s64_(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; - - #[link_name = "llvm.aarch64.neon.vsri.v8i8"] - fn vsri_n_s8_(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - #[link_name = "llvm.aarch64.neon.vsri.v16i8"] - fn vsriq_n_s8_(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; - #[link_name = "llvm.aarch64.neon.vsri.v4i16"] - fn vsri_n_s16_(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - #[link_name = "llvm.aarch64.neon.vsri.v8i16"] - fn vsriq_n_s16_(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - #[link_name = "llvm.aarch64.neon.vsri.v2i32"] - fn vsri_n_s32_(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - #[link_name = "llvm.aarch64.neon.vsri.v4i32"] - fn vsriq_n_s32_(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - #[link_name = "llvm.aarch64.neon.vsri.v1i64"] - fn vsri_n_s64_(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; - #[link_name = "llvm.aarch64.neon.vsri.v2i64"] - fn vsriq_n_s64_(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; -} - /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] @@ -485,3710 +190,445 @@ pub unsafe fn vcopy_laneq_f64( transmute::(simd_extract!(b, LANE2 as u32)) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Load multiple single-element structures to one, two, three, or four registers #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - read_unaligned(ptr.cast()) +pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t { + vld1_f64(ptr) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Load multiple single-element structures to one, two, three, or four registers #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(ld1r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - read_unaligned(ptr.cast()) +pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t { + let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.))); + simd_shuffle!(x, x, [0, 0]) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ldr, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ld1, LANE = 1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - read_unaligned(ptr.cast()) +pub unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register +/// to the corresponding bit from the first source SIMD&FP register when the original +/// destination bit was 1, otherwise from the second source SIMD&FP register. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(bsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + let not = int64x1_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } - -/// Load multiple single-element structures to one, two, three, or four registers. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(bsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t { + let not = int64x1_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } - -/// Load multiple single-element structures to one, two, three, or four registers. +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(bsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { - read_unaligned(ptr.cast()) +pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let not = int64x2_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } - -/// Load multiple single-element structures to one, two, three, or four registers. +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(bsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t { + let not = int64x2_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(fadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - read_unaligned(ptr.cast()) +pub unsafe fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_add(a, b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(fadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - read_unaligned(ptr.cast()) +pub unsafe fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_add(a, b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(add))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_add(a, b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(add))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - read_unaligned(ptr.cast()) +pub unsafe fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_add(a, b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(add))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { + a.wrapping_add(b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Vector add. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(add))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { + a.wrapping_add(b) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Extract vector from pair of vectors #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { - read_unaligned(ptr.cast()) +pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t { + static_assert!(N == 0); + a } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Extract vector from pair of vectors #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64x1_t { + static_assert!(N == 0); + a } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(fmov))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - read_unaligned(ptr.cast()) +pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t { + transmute(u64x1::new(value)) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - read_unaligned(ptr.cast()) +pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t { + float64x1_t::splat(value) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t { + transmute(u64x2::new(value, value)) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - read_unaligned(ptr.cast()) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - read_unaligned(ptr.cast()) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t { + float64x2_t::splat(value) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(fmov))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t { + vdup_n_p64(value) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - read_unaligned(ptr.cast()) +pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t { + vdup_n_f64(value) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { - read_unaligned(ptr.cast()) +pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t { + vdupq_n_p64(value) } -/// Load multiple single-element structures to one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(test, assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { - read_unaligned(ptr.cast()) +pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t { + vdupq_n_f64(value) } -/// Load multiple single-element structures to one, two, three, or four registers +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(mov))] +#[cfg_attr(all(test, target_env = "msvc"), assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t { - vld1_f64(ptr) +pub unsafe fn vget_high_f64(a: float64x2_t) -> float64x1_t { + float64x1_t([simd_extract!(a, 1)]) } -/// Load multiple single-element structures to one, two, three, or four registers +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld1r))] +#[cfg_attr(test, assert_instr(ext))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t { - let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.))); - simd_shuffle!(x, x, [0, 0]) +pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { + transmute(u64x1::new(simd_extract!(a, 1))) } -/// Load one single-element structure to one lane of one register. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(ldr, LANE = 0))] +#[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) +pub unsafe fn vget_low_f64(a: float64x2_t) -> float64x1_t { + float64x1_t([simd_extract!(a, 0)]) } -/// Load one single-element structure to one lane of one register. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(ld1, LANE = 1))] +#[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { + transmute(u64x1::new(simd_extract!(a, 0))) } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - write_unaligned(ptr.cast(), a); +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, IMM5 = 0) +)] +pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { + static_assert!(IMM5 == 0); + simd_extract!(v, IMM5 as u32) } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - write_unaligned(ptr.cast(), a); +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, IMM5 = 0) +)] +pub unsafe fn vgetq_lane_f64(v: float64x2_t) -> f64 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Vector combine #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[cfg_attr(test, assert_instr(mov))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - write_unaligned(ptr.cast(), a); +pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t { + simd_shuffle!(low, high, [0, 1]) } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Shift left #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - write_unaligned(ptr.cast(), a); +pub unsafe fn vshld_n_s64(a: i64) -> i64 { + static_assert_uimm_bits!(N, 6); + a << N } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Shift left #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - write_unaligned(ptr.cast(), a); +pub unsafe fn vshld_n_u64(a: u64) -> u64 { + static_assert_uimm_bits!(N, 6); + a << N } -/// Store multiple single-element structures from one, two, three, or four registers. +/// Signed shift right #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - write_unaligned(ptr.cast(), a); -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { - write_unaligned(ptr.cast(), a); -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { - write_unaligned(ptr.cast(), a); -} - -/// Absolute Value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(abs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabsd_s64(a: i64) -> i64 { - vabsd_s64_(a) -} -/// Absolute Value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(abs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { - vabs_s64_(a) -} -/// Absolute Value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(abs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { - vabsq_s64_(a) -} - -/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register -/// to the corresponding bit from the first source SIMD&FP register when the original -/// destination bit was 1, otherwise from the second source SIMD&FP register. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(bsl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - let not = int64x1_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) -} -/// Bitwise Select. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(bsl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t { - let not = int64x1_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) -} -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(bsl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let not = int64x2_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) -} -/// Bitwise Select. (128-bit) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(bsl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t { - let not = int64x2_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) -} - -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - vuqadd_s8_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - vuqaddq_s8_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - vuqadd_s16_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - vuqaddq_s16_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - vuqadd_s32_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - vuqaddq_s32_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { - vuqadd_s64_(a, b) -} -/// Signed saturating Accumulate of Unsigned value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - vuqaddq_s64_(a, b) -} - -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - vsqadd_u8_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - vsqaddq_u8_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - vsqadd_u16_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - vsqaddq_u16_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - vsqadd_u32_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - vsqaddq_u32_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - vsqadd_u64_(a, b) -} -/// Unsigned saturating Accumulate of Signed value. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - vsqaddq_u64_(a, b) -} - -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - vpaddq_s16_(a, b) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - transmute(vpaddq_s16_(transmute(a), transmute(b))) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - vpaddq_s32_(a, b) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - transmute(vpaddq_s32_(transmute(a), transmute(b))) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - vpaddq_s64_(a, b) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - transmute(vpaddq_s64_(transmute(a), transmute(b))) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - vpaddq_s8_(a, b) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - transmute(vpaddq_s8_(transmute(a), transmute(b))) +pub unsafe fn vshrd_n_s64(a: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + a >> n } -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { - transmute(vaddvq_u64_(transmute(a))) -} -/// Add pairwise -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { - vaddvq_u64_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { - vaddv_s16_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { - vaddv_s32_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { - vaddv_s8_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { - vaddv_u16_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { - vaddv_u32_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { - vaddv_u8_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { - vaddvq_s16_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { - vaddvq_s32_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { - vaddvq_s8_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { - vaddvq_u16_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { - vaddvq_u32_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { - vaddvq_u8_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { - vaddvq_s64_(a) -} - -/// Add across vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(addp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { - vaddvq_u64_(a) -} - -/// Signed Add Long across Vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(saddlv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { - vaddlv_s8_(a) as i16 -} - -/// Signed Add Long across Vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(saddlv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { - vaddlvq_s8_(a) as i16 -} - -/// Unsigned Add Long across Vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uaddlv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { - vaddlv_u8_(a) as u16 -} - -/// Unsigned Add Long across Vector -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uaddlv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { - vaddlvq_u8_(a) as u16 -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_add(a, b) -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_add(a, b) -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(add))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_add(a, b) -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(add))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_add(a, b) -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(add))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { - a.wrapping_add(b) -} - -/// Vector add. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(add))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { - a.wrapping_add(b) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { - vmaxv_s8_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { - vmaxvq_s8_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { - vmaxv_s16_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { - vmaxvq_s16_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { - vmaxv_s32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { - vmaxvq_s32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { - vmaxv_u8_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { - vmaxvq_u8_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { - vmaxv_u16_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { - vmaxvq_u16_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { - vmaxv_u32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { - vmaxvq_u32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { - vmaxv_f32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { - vmaxvq_f32_(a) -} - -/// Horizontal vector max. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { - vmaxvq_f64_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { - vminv_s8_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { - vminvq_s8_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { - vminv_s16_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { - vminvq_s16_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { - vminv_s32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { - vminvq_s32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { - vminv_u8_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { - vminvq_u8_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { - vminv_u16_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { - vminvq_u16_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { - vminv_u32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { - vminvq_u32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { - vminv_f32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { - vminvq_f32_(a) -} - -/// Horizontal vector min. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { - vminvq_f64_(a) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - vpminq_s8_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - vpminq_s16_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - vpminq_s32_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - vpminq_u8_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - vpminq_u16_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - vpminq_u32_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - vpminq_f32_(a, b) -} - -/// Folding minimum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - vpminq_f64_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - vpmaxq_s8_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - vpmaxq_s16_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - vpmaxq_s32_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - vpmaxq_u8_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - vpmaxq_u16_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - vpmaxq_u32_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - vpmaxq_f32_(a, b) -} - -/// Folding maximum of adjacent pairs -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - vpmaxq_f64_(a, b) -} - -/// Extract vector from pair of vectors -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t { - static_assert!(N == 0); - a -} - -/// Extract vector from pair of vectors -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64x1_t { - static_assert!(N == 0); - a -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t { - transmute(u64x1::new(value)) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t { - float64x1_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t { - transmute(u64x2::new(value, value)) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t { - float64x2_t::splat(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t { - vdup_n_p64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t { - vdup_n_f64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t { - vdupq_n_p64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t { - vdupq_n_f64(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(mov))] -#[cfg_attr(all(test, target_env = "msvc"), assert_instr(dup))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vget_high_f64(a: float64x2_t) -> float64x1_t { - float64x1_t([simd_extract!(a, 1)]) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { - transmute(u64x1::new(simd_extract!(a, 1))) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vget_low_f64(a: float64x2_t) -> float64x1_t { - float64x1_t([simd_extract!(a, 0)]) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { - transmute(u64x1::new(simd_extract!(a, 0))) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, IMM5 = 0) -)] -pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { - static_assert!(IMM5 == 0); - simd_extract!(v, IMM5 as u32) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, IMM5 = 0) -)] -pub unsafe fn vgetq_lane_f64(v: float64x2_t) -> f64 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t { - simd_shuffle!(low, high, [0, 1]) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vqtbl1_s8(vcombine_s8(a, zeroed()), transmute(b)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - vqtbl1_u8(vcombine_u8(a, zeroed()), b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - vqtbl1_p8(vcombine_p8(a, zeroed()), b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vqtbl1_s8(vcombine_s8(a.0, a.1), transmute(b)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - vqtbl1_u8(vcombine_u8(a.0, a.1), b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - vqtbl1_p8(vcombine_p8(a.0, a.1), b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - vqtbl2_s8( - int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, zeroed())), - transmute(b), - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - vqtbl2_u8( - uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, zeroed())), - b, - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - vqtbl2_p8( - poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, zeroed())), - b, - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - vqtbl2_s8( - int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)), - transmute(b), - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - vqtbl2_u8( - uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)), - b, - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - vqtbl2_p8( - poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)), - b, - ) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let r = vqtbx1_s8(a, vcombine_s8(b, zeroed()), transmute(c)); - let m: int8x8_t = simd_lt(c, transmute(i8x8::splat(8))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let r = vqtbx1_u8(a, vcombine_u8(b, zeroed()), c); - let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - let r = vqtbx1_p8(a, vcombine_p8(b, zeroed()), c); - let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - vqtbx1_s8(a, vcombine_s8(b.0, b.1), transmute(c)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - vqtbx1_u8(a, vcombine_u8(b.0, b.1), c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - vqtbx1_p8(a, vcombine_p8(b.0, b.1), c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - let r = vqtbx2_s8( - a, - int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, zeroed())), - transmute(c), - ); - let m: int8x8_t = simd_lt(c, transmute(i8x8::splat(24))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let r = vqtbx2_u8( - a, - uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, zeroed())), - c, - ); - let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(24))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let r = vqtbx2_p8( - a, - poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, zeroed())), - c, - ); - let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(24))); - simd_select(m, r, a) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - vqtbx2_s8( - a, - int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, b.3)), - transmute(c), - ) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - vqtbx2_u8( - a, - uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, b.3)), - c, - ) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - vqtbx2_p8( - a, - poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, b.3)), - c, - ) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_s8(t: int8x16_t, idx: uint8x8_t) -> int8x8_t { - vqtbl1(t, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_s8(t: int8x16_t, idx: uint8x16_t) -> int8x16_t { - vqtbl1q(t, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_u8(t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbl1(transmute(t), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_u8(t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbl1q(transmute(t), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_p8(t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbl1(transmute(t), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_p8(t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbl1q(transmute(t), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_s8(a: int8x8_t, t: int8x16_t, idx: uint8x8_t) -> int8x8_t { - vqtbx1(a, t, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_s8(a: int8x16_t, t: int8x16_t, idx: uint8x16_t) -> int8x16_t { - vqtbx1q(a, t, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_u8(a: uint8x8_t, t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbx1(transmute(a), transmute(t), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_u8(a: uint8x16_t, t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbx1q(transmute(a), transmute(t), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_p8(a: poly8x8_t, t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbx1(transmute(a), transmute(t), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_p8(a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbx1q(transmute(a), transmute(t), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_s8(t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t { - vqtbl2(t.0, t.1, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_s8(t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t { - vqtbl2q(t.0, t.1, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_u8(t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbl2(transmute(t.0), transmute(t.1), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_u8(t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbl2q(transmute(t.0), transmute(t.1), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_p8(t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbl2(transmute(t.0), transmute(t.1), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_p8(t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbl2q(transmute(t.0), transmute(t.1), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_s8(a: int8x8_t, t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t { - vqtbx2(a, t.0, t.1, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_s8(a: int8x16_t, t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t { - vqtbx2q(a, t.0, t.1, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_u8(a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbx2(transmute(a), transmute(t.0), transmute(t.1), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_u8(a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbx2q(transmute(a), transmute(t.0), transmute(t.1), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_p8(a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbx2(transmute(a), transmute(t.0), transmute(t.1), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_p8(a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbx2q(transmute(a), transmute(t.0), transmute(t.1), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_s8(t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t { - vqtbl3(t.0, t.1, t.2, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_s8(t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t { - vqtbl3q(t.0, t.1, t.2, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbl3(transmute(t.0), transmute(t.1), transmute(t.2), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbl3q(transmute(t.0), transmute(t.1), transmute(t.2), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbl3(transmute(t.0), transmute(t.1), transmute(t.2), idx)) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbl3q(transmute(t.0), transmute(t.1), transmute(t.2), idx)) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_s8(a: int8x8_t, t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t { - vqtbx3(a, t.0, t.1, t.2, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_s8(a: int8x16_t, t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t { - vqtbx3q(a, t.0, t.1, t.2, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_u8(a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbx3( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_u8(a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbx3q( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_p8(a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbx3( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_p8(a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbx3q( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - idx, - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_s8(t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t { - vqtbl4(t.0, t.1, t.2, t.3, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_s8(t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t { - vqtbl4q(t.0, t.1, t.2, t.3, idx) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbl4( - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbl4q( - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbl4( - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbl4q( - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_s8(a: int8x8_t, t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t { - vqtbx4(a, t.0, t.1, t.2, t.3, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_s8(a: int8x16_t, t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t { - vqtbx4q(a, t.0, t.1, t.2, t.3, idx) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_u8(a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { - transmute(vqtbx4( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_u8(a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { - transmute(vqtbx4q( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_p8(a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { - transmute(vqtbx4( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { - transmute(vqtbx4q( - transmute(a), - transmute(t.0), - transmute(t.1), - transmute(t.2), - transmute(t.3), - idx, - )) -} - -/// Shift left -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_n_s64(a: i64) -> i64 { - static_assert_uimm_bits!(N, 6); - a << N -} - -/// Shift left -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_n_u64(a: u64) -> u64 { - static_assert_uimm_bits!(N, 6); - a << N -} - -/// Signed shift right -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrd_n_s64(a: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - a >> n -} - -/// Unsigned shift right -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrd_n_u64(a: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return 0; - } else { - N - }; - a >> n -} - -/// Signed shift right and accumulate -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - a.wrapping_add(vshrd_n_s64::(b)) -} - -/// Unsigned shift right and accumulate -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - a.wrapping_add(vshrd_n_u64::(b)) -} - -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vsli_n_s8_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vsliq_n_s8_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vsli_n_s16_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vsliq_n_s16_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - vsli_n_s32_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - vsliq_n_s32_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 0 && N <= 63); - vsli_n_s64_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); - vsliq_n_s64_(a, b, N) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vsli_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vsli_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - transmute(vsli_n_s32_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vsli_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vsli_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) -} - -/// Shift Left and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64_(transmute(a), transmute(b), N)) -} - -/// Shift Left and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - vsri_n_s8_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vsriq_n_s8_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - vsri_n_s16_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vsriq_n_s16_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - vsri_n_s32_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vsriq_n_s32_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - vsri_n_s64_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - vsriq_n_s64_(a, b, N) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsri_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsri_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - transmute(vsri_n_s32_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsri_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsri_n_s16_(transmute(a), transmute(b), N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) -} - -/// Shift Right and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64_(transmute(a), transmute(b), N)) -} - -/// Shift Right and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) -} - -/// SM3TT1A -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1a" - )] - fn vsm3tt1aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t; - } - vsm3tt1aq_u32_(a, b, c, IMM2 as i64) -} - -/// SM3TT1B -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1b" - )] - fn vsm3tt1bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t; - } - vsm3tt1bq_u32_(a, b, c, IMM2 as i64) -} - -/// SM3TT2A -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2a" - )] - fn vsm3tt2aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t; - } - vsm3tt2aq_u32_(a, b, c, IMM2 as i64) -} - -/// SM3TT2B -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2b" - )] - fn vsm3tt2bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t; - } - vsm3tt2bq_u32_(a, b, c, IMM2 as i64) -} - -/// Exclusive OR and rotate -#[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(IMM6, 6); - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.xar" - )] - fn vxarq_u64_(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t; - } - vxarq_u64_(a, b, IMM6 as i64) -} - -#[cfg(test)] -mod tests { - use crate::core_arch::aarch64::test_support::*; - use crate::core_arch::arm_shared::test_support::*; - use crate::core_arch::{aarch64::neon::*, aarch64::*, simd::*}; - use std::mem::transmute; - use stdarch_test::simd_test; - - #[simd_test(enable = "neon")] - unsafe fn test_vuqadd_s8() { - let a = i8x8::new(i8::MIN, -3, -2, -1, 0, 1, 2, i8::MAX); - let b = u8x8::new(u8::MAX, 1, 2, 3, 4, 5, 6, 7); - let e = i8x8::new(i8::MAX, -2, 0, 2, 4, 6, 8, i8::MAX); - let r: i8x8 = transmute(vuqadd_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqaddq_s8() { - let a = i8x16::new( - i8::MIN, - -7, - -6, - -5, - -4, - -3, - -2, - -1, - 0, - 1, - 2, - 3, - 4, - 5, - 6, - i8::MAX, - ); - let b = u8x16::new(u8::MAX, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let e = i8x16::new( - i8::MAX, - -6, - -4, - -2, - 0, - 2, - 4, - 6, - 8, - 10, - 12, - 14, - 16, - 18, - 20, - i8::MAX, - ); - let r: i8x16 = transmute(vuqaddq_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqadd_s16() { - let a = i16x4::new(i16::MIN, -1, 0, i16::MAX); - let b = u16x4::new(u16::MAX, 1, 2, 3); - let e = i16x4::new(i16::MAX, 0, 2, i16::MAX); - let r: i16x4 = transmute(vuqadd_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqaddq_s16() { - let a = i16x8::new(i16::MIN, -3, -2, -1, 0, 1, 2, i16::MAX); - let b = u16x8::new(u16::MAX, 1, 2, 3, 4, 5, 6, 7); - let e = i16x8::new(i16::MAX, -2, 0, 2, 4, 6, 8, i16::MAX); - let r: i16x8 = transmute(vuqaddq_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqadd_s32() { - let a = i32x2::new(i32::MIN, i32::MAX); - let b = u32x2::new(u32::MAX, 1); - let e = i32x2::new(i32::MAX, i32::MAX); - let r: i32x2 = transmute(vuqadd_s32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqaddq_s32() { - let a = i32x4::new(i32::MIN, -1, 0, i32::MAX); - let b = u32x4::new(u32::MAX, 1, 2, 3); - let e = i32x4::new(i32::MAX, 0, 2, i32::MAX); - let r: i32x4 = transmute(vuqaddq_s32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqadd_s64() { - let a = i64x1::new(i64::MIN); - let b = u64x1::new(u64::MAX); - let e = i64x1::new(i64::MAX); - let r: i64x1 = transmute(vuqadd_s64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vuqaddq_s64() { - let a = i64x2::new(i64::MIN, i64::MAX); - let b = u64x2::new(u64::MAX, 1); - let e = i64x2::new(i64::MAX, i64::MAX); - let r: i64x2 = transmute(vuqaddq_s64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vsqadd_u8() { - let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, u8::MAX); - let b = i8x8::new(i8::MIN, -3, -2, -1, 0, 1, 2, 3); - let e = u8x8::new(0, 0, 0, 2, 4, 6, 8, u8::MAX); - let r: u8x8 = transmute(vsqadd_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqaddq_u8() { - let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, u8::MAX); - let b = i8x16::new(i8::MIN, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7); - let e = u8x16::new(0, 0, 0, 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, u8::MAX); - let r: u8x16 = transmute(vsqaddq_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqadd_u16() { - let a = u16x4::new(0, 1, 2, u16::MAX); - let b = i16x4::new(i16::MIN, -1, 0, 1); - let e = u16x4::new(0, 0, 2, u16::MAX); - let r: u16x4 = transmute(vsqadd_u16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqaddq_u16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, u16::MAX); - let b = i16x8::new(i16::MIN, -3, -2, -1, 0, 1, 2, 3); - let e = u16x8::new(0, 0, 0, 2, 4, 6, 8, u16::MAX); - let r: u16x8 = transmute(vsqaddq_u16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqadd_u32() { - let a = u32x2::new(0, u32::MAX); - let b = i32x2::new(i32::MIN, 1); - let e = u32x2::new(0, u32::MAX); - let r: u32x2 = transmute(vsqadd_u32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqaddq_u32() { - let a = u32x4::new(0, 1, 2, u32::MAX); - let b = i32x4::new(i32::MIN, -1, 0, 1); - let e = u32x4::new(0, 0, 2, u32::MAX); - let r: u32x4 = transmute(vsqaddq_u32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqadd_u64() { - let a = u64x1::new(0); - let b = i64x1::new(i64::MIN); - let e = u64x1::new(0); - let r: u64x1 = transmute(vsqadd_u64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsqaddq_u64() { - let a = u64x2::new(0, u64::MAX); - let b = i64x2::new(i64::MIN, 1); - let e = u64x2::new(0, u64::MAX); - let r: u64x2 = transmute(vsqaddq_u64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_s16() { - let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = i16x8::new(0, -1, -2, -3, -4, -5, -6, -7); - let r: i16x8 = transmute(vpaddq_s16(transmute(a), transmute(b))); - let e = i16x8::new(3, 7, 11, 15, -1, -5, -9, -13); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_s32() { - let a = i32x4::new(1, 2, 3, 4); - let b = i32x4::new(0, -1, -2, -3); - let r: i32x4 = transmute(vpaddq_s32(transmute(a), transmute(b))); - let e = i32x4::new(3, 7, -1, -5); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_s64() { - let a = i64x2::new(1, 2); - let b = i64x2::new(0, -1); - let r: i64x2 = transmute(vpaddq_s64(transmute(a), transmute(b))); - let e = i64x2::new(3, -1); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_s8() { - let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let b = i8x16::new( - 0, -1, -2, -3, -4, -5, -6, -7, -8, -8, -10, -11, -12, -13, -14, -15, - ); - let r: i8x16 = transmute(vpaddq_s8(transmute(a), transmute(b))); - let e = i8x16::new( - 3, 7, 11, 15, 19, 23, 27, 31, -1, -5, -9, -13, -16, -21, -25, -29, - ); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_u16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let b = u16x8::new(17, 18, 19, 20, 20, 21, 22, 23); - let r: u16x8 = transmute(vpaddq_u16(transmute(a), transmute(b))); - let e = u16x8::new(1, 5, 9, 13, 35, 39, 41, 45); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_u32() { - let a = u32x4::new(0, 1, 2, 3); - let b = u32x4::new(17, 18, 19, 20); - let r: u32x4 = transmute(vpaddq_u32(transmute(a), transmute(b))); - let e = u32x4::new(1, 5, 35, 39); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_u64() { - let a = u64x2::new(0, 1); - let b = u64x2::new(17, 18); - let r: u64x2 = transmute(vpaddq_u64(transmute(a), transmute(b))); - let e = u64x2::new(1, 35); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddq_u8() { - let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let b = i8x16::new( - 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 29, 29, 30, 31, - ); - let r = i8x16::new(1, 5, 9, 13, 17, 21, 25, 29, 35, 39, 41, 45, 49, 53, 58, 61); - let e: i8x16 = transmute(vpaddq_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddd_s64() { - let a = i64x2::new(2, -3); - let r: i64 = vpaddd_s64(transmute(a)); - let e = -1_i64; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpaddd_u64() { - let a = i64x2::new(2, 3); - let r: u64 = vpaddd_u64(transmute(a)); - let e = 5_u64; - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_f64() { - let a = 1.; - let b = 8.; - let e = 9.; - let r: f64 = transmute(vadd_f64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_f64() { - let a = f64x2::new(1., 2.); - let b = f64x2::new(8., 7.); - let e = f64x2::new(9., 9.); - let r: f64x2 = transmute(vaddq_f64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_s64() { - let a = 1_i64; - let b = 8_i64; - let e = 9_i64; - let r: i64 = transmute(vadd_s64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_u64() { - let a = 1_u64; - let b = 8_u64; - let e = 9_u64; - let r: u64 = transmute(vadd_u64(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddd_s64() { - let a = 1_i64; - let b = 8_i64; - let e = 9_i64; - let r: i64 = vaddd_s64(a, b); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddd_u64() { - let a = 1_u64; - let b = 8_u64; - let e = 9_u64; - let r: u64 = vaddd_u64(a, b); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_s8() { - let r = vmaxv_s8(transmute(i8x8::new(1, 2, 3, 4, -8, 6, 7, 5))); - assert_eq!(r, 7_i8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_s8() { - #[rustfmt::skip] - let r = vmaxvq_s8(transmute(i8x16::new( - 1, 2, 3, 4, - -16, 6, 7, 5, - 8, 1, 1, 1, - 1, 1, 1, 1, - ))); - assert_eq!(r, 8_i8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_s16() { - let r = vmaxv_s16(transmute(i16x4::new(1, 2, -4, 3))); - assert_eq!(r, 3_i16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_s16() { - let r = vmaxvq_s16(transmute(i16x8::new(1, 2, 7, 4, -16, 6, 7, 5))); - assert_eq!(r, 7_i16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_s32() { - let r = vmaxv_s32(transmute(i32x2::new(1, -4))); - assert_eq!(r, 1_i32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_s32() { - let r = vmaxvq_s32(transmute(i32x4::new(1, 2, -32, 4))); - assert_eq!(r, 4_i32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_u8() { - let r = vmaxv_u8(transmute(u8x8::new(1, 2, 3, 4, 8, 6, 7, 5))); - assert_eq!(r, 8_u8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_u8() { - #[rustfmt::skip] - let r = vmaxvq_u8(transmute(u8x16::new( - 1, 2, 3, 4, - 16, 6, 7, 5, - 8, 1, 1, 1, - 1, 1, 1, 1, - ))); - assert_eq!(r, 16_u8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_u16() { - let r = vmaxv_u16(transmute(u16x4::new(1, 2, 4, 3))); - assert_eq!(r, 4_u16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_u16() { - let r = vmaxvq_u16(transmute(u16x8::new(1, 2, 7, 4, 16, 6, 7, 5))); - assert_eq!(r, 16_u16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_u32() { - let r = vmaxv_u32(transmute(u32x2::new(1, 4))); - assert_eq!(r, 4_u32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_u32() { - let r = vmaxvq_u32(transmute(u32x4::new(1, 2, 32, 4))); - assert_eq!(r, 32_u32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxv_f32() { - let r = vmaxv_f32(transmute(f32x2::new(1., 4.))); - assert_eq!(r, 4_f32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_f32() { - let r = vmaxvq_f32(transmute(f32x4::new(1., 2., 32., 4.))); - assert_eq!(r, 32_f32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmaxvq_f64() { - let r = vmaxvq_f64(transmute(f64x2::new(1., 4.))); - assert_eq!(r, 4_f64); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_s8() { - let r = vminv_s8(transmute(i8x8::new(1, 2, 3, 4, -8, 6, 7, 5))); - assert_eq!(r, -8_i8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_s8() { - #[rustfmt::skip] - let r = vminvq_s8(transmute(i8x16::new( - 1, 2, 3, 4, - -16, 6, 7, 5, - 8, 1, 1, 1, - 1, 1, 1, 1, - ))); - assert_eq!(r, -16_i8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_s16() { - let r = vminv_s16(transmute(i16x4::new(1, 2, -4, 3))); - assert_eq!(r, -4_i16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_s16() { - let r = vminvq_s16(transmute(i16x8::new(1, 2, 7, 4, -16, 6, 7, 5))); - assert_eq!(r, -16_i16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_s32() { - let r = vminv_s32(transmute(i32x2::new(1, -4))); - assert_eq!(r, -4_i32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_s32() { - let r = vminvq_s32(transmute(i32x4::new(1, 2, -32, 4))); - assert_eq!(r, -32_i32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_u8() { - let r = vminv_u8(transmute(u8x8::new(1, 2, 3, 4, 8, 6, 7, 5))); - assert_eq!(r, 1_u8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_u8() { - #[rustfmt::skip] - let r = vminvq_u8(transmute(u8x16::new( - 1, 2, 3, 4, - 16, 6, 7, 5, - 8, 1, 1, 1, - 1, 1, 1, 1, - ))); - assert_eq!(r, 1_u8); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_u16() { - let r = vminv_u16(transmute(u16x4::new(1, 2, 4, 3))); - assert_eq!(r, 1_u16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_u16() { - let r = vminvq_u16(transmute(u16x8::new(1, 2, 7, 4, 16, 6, 7, 5))); - assert_eq!(r, 1_u16); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_u32() { - let r = vminv_u32(transmute(u32x2::new(1, 4))); - assert_eq!(r, 1_u32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_u32() { - let r = vminvq_u32(transmute(u32x4::new(1, 2, 32, 4))); - assert_eq!(r, 1_u32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminv_f32() { - let r = vminv_f32(transmute(f32x2::new(1., 4.))); - assert_eq!(r, 1_f32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_f32() { - let r = vminvq_f32(transmute(f32x4::new(1., 2., 32., 4.))); - assert_eq!(r, 1_f32); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vminvq_f64() { - let r = vminvq_f64(transmute(f64x2::new(1., 4.))); - assert_eq!(r, 1_f64); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_s8() { - #[rustfmt::skip] - let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8); - #[rustfmt::skip] - let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9); - #[rustfmt::skip] - let e = i8x16::new(-2, -4, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6); - let r: i8x16 = transmute(vpminq_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_s16() { - let a = i16x8::new(1, -2, 3, 4, 5, 6, 7, 8); - let b = i16x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = i16x8::new(-2, 3, 5, 7, 0, 2, 4, 6); - let r: i16x8 = transmute(vpminq_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_s32() { - let a = i32x4::new(1, -2, 3, 4); - let b = i32x4::new(0, 3, 2, 5); - let e = i32x4::new(-2, 3, 0, 2); - let r: i32x4 = transmute(vpminq_s32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_u8() { - #[rustfmt::skip] - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8); - #[rustfmt::skip] - let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9); - #[rustfmt::skip] - let e = u8x16::new(1, 3, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6); - let r: u8x16 = transmute(vpminq_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_u16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = u16x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = u16x8::new(1, 3, 5, 7, 0, 2, 4, 6); - let r: u16x8 = transmute(vpminq_u16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpminq_u32() { - let a = u32x4::new(1, 2, 3, 4); - let b = u32x4::new(0, 3, 2, 5); - let e = u32x4::new(1, 3, 0, 2); - let r: u32x4 = transmute(vpminq_u32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_f32() { - let a = f32x4::new(1., -2., 3., 4.); - let b = f32x4::new(0., 3., 2., 5.); - let e = f32x4::new(-2., 3., 0., 2.); - let r: f32x4 = transmute(vpminq_f32(transmute(a), transmute(b))); - assert_eq!(r, e); - } +/// Unsigned shift right +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vshrd_n_u64(a: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return 0; + } else { + N + }; + a >> n +} - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_f64() { - let a = f64x2::new(1., -2.); - let b = f64x2::new(0., 3.); - let e = f64x2::new(-2., 0.); - let r: f64x2 = transmute(vpminq_f64(transmute(a), transmute(b))); - assert_eq!(r, e); - } +/// Signed shift right and accumulate +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + a.wrapping_add(vshrd_n_s64::(b)) +} - #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_s8() { - #[rustfmt::skip] - let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8); - #[rustfmt::skip] - let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9); - #[rustfmt::skip] - let e = i8x16::new(1, 3, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9); - let r: i8x16 = transmute(vpmaxq_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } +/// Unsigned shift right and accumulate +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + a.wrapping_add(vshrd_n_u64::(b)) +} - #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_s16() { - let a = i16x8::new(1, -2, 3, 4, 5, 6, 7, 8); - let b = i16x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = i16x8::new(1, 4, 6, 8, 3, 5, 7, 9); - let r: i16x8 = transmute(vpmaxq_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } +#[cfg(test)] +mod tests { + use crate::core_arch::aarch64::test_support::*; + use crate::core_arch::arm_shared::test_support::*; + use crate::core_arch::{aarch64::neon::*, aarch64::*, simd::*}; + use std::mem::transmute; + use stdarch_test::simd_test; #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_s32() { - let a = i32x4::new(1, -2, 3, 4); - let b = i32x4::new(0, 3, 2, 5); - let e = i32x4::new(1, 4, 3, 5); - let r: i32x4 = transmute(vpmaxq_s32(transmute(a), transmute(b))); + unsafe fn test_vadd_f64() { + let a = 1.; + let b = 8.; + let e = 9.; + let r: f64 = transmute(vadd_f64(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_u8() { - #[rustfmt::skip] - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8); - #[rustfmt::skip] - let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9); - #[rustfmt::skip] - let e = u8x16::new(2, 4, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9); - let r: u8x16 = transmute(vpmaxq_u8(transmute(a), transmute(b))); + unsafe fn test_vaddq_f64() { + let a = f64x2::new(1., 2.); + let b = f64x2::new(8., 7.); + let e = f64x2::new(9., 9.); + let r: f64x2 = transmute(vaddq_f64(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_u16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = u16x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = u16x8::new(2, 4, 6, 8, 3, 5, 7, 9); - let r: u16x8 = transmute(vpmaxq_u16(transmute(a), transmute(b))); + unsafe fn test_vadd_s64() { + let a = 1_i64; + let b = 8_i64; + let e = 9_i64; + let r: i64 = transmute(vadd_s64(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpmaxq_u32() { - let a = u32x4::new(1, 2, 3, 4); - let b = u32x4::new(0, 3, 2, 5); - let e = u32x4::new(2, 4, 3, 5); - let r: u32x4 = transmute(vpmaxq_u32(transmute(a), transmute(b))); + unsafe fn test_vadd_u64() { + let a = 1_u64; + let b = 8_u64; + let e = 9_u64; + let r: u64 = transmute(vadd_u64(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpmax_f32() { - let a = f32x4::new(1., -2., 3., 4.); - let b = f32x4::new(0., 3., 2., 5.); - let e = f32x4::new(1., 4., 3., 5.); - let r: f32x4 = transmute(vpmaxq_f32(transmute(a), transmute(b))); + unsafe fn test_vaddd_s64() { + let a = 1_i64; + let b = 8_i64; + let e = 9_i64; + let r: i64 = vaddd_s64(a, b); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpmax_f64() { - let a = f64x2::new(1., -2.); - let b = f64x2::new(0., 3.); - let e = f64x2::new(1., 3.); - let r: f64x2 = transmute(vpmaxq_f64(transmute(a), transmute(b))); + unsafe fn test_vaddd_u64() { + let a = 1_u64; + let b = 8_u64; + let e = 9_u64; + let r: u64 = vaddd_u64(a, b); assert_eq!(r, e); } @@ -4444,291 +884,6 @@ mod tests { assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vceq_u64() { - test_cmp_u64( - |i, j| vceq_u64(i, j), - |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vceqq_u64() { - testq_cmp_u64( - |i, j| vceqq_u64(i, j), - |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vceq_s64() { - test_cmp_s64( - |i, j| vceq_s64(i, j), - |a: i64, b: i64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vceqq_s64() { - testq_cmp_s64( - |i, j| vceqq_s64(i, j), - |a: i64, b: i64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vceq_p64() { - test_cmp_p64( - |i, j| vceq_p64(i, j), - |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vceqq_p64() { - testq_cmp_p64( - |i, j| vceqq_p64(i, j), - |a: u64, b: u64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vceq_f64() { - test_cmp_f64( - |i, j| vceq_f64(i, j), - |a: f64, b: f64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vceqq_f64() { - testq_cmp_f64( - |i, j| vceqq_f64(i, j), - |a: f64, b: f64| -> u64 { if a == b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcgt_s64() { - test_cmp_s64( - |i, j| vcgt_s64(i, j), - |a: i64, b: i64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgtq_s64() { - testq_cmp_s64( - |i, j| vcgtq_s64(i, j), - |a: i64, b: i64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcgt_u64() { - test_cmp_u64( - |i, j| vcgt_u64(i, j), - |a: u64, b: u64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgtq_u64() { - testq_cmp_u64( - |i, j| vcgtq_u64(i, j), - |a: u64, b: u64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcgt_f64() { - test_cmp_f64( - |i, j| vcgt_f64(i, j), - |a: f64, b: f64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgtq_f64() { - testq_cmp_f64( - |i, j| vcgtq_f64(i, j), - |a: f64, b: f64| -> u64 { if a > b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vclt_s64() { - test_cmp_s64( - |i, j| vclt_s64(i, j), - |a: i64, b: i64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcltq_s64() { - testq_cmp_s64( - |i, j| vcltq_s64(i, j), - |a: i64, b: i64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vclt_u64() { - test_cmp_u64( - |i, j| vclt_u64(i, j), - |a: u64, b: u64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcltq_u64() { - testq_cmp_u64( - |i, j| vcltq_u64(i, j), - |a: u64, b: u64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vltq_f64() { - test_cmp_f64( - |i, j| vclt_f64(i, j), - |a: f64, b: f64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcltq_f64() { - testq_cmp_f64( - |i, j| vcltq_f64(i, j), - |a: f64, b: f64| -> u64 { if a < b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcle_s64() { - test_cmp_s64( - |i, j| vcle_s64(i, j), - |a: i64, b: i64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcleq_s64() { - testq_cmp_s64( - |i, j| vcleq_s64(i, j), - |a: i64, b: i64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcle_u64() { - test_cmp_u64( - |i, j| vcle_u64(i, j), - |a: u64, b: u64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcleq_u64() { - testq_cmp_u64( - |i, j| vcleq_u64(i, j), - |a: u64, b: u64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vleq_f64() { - test_cmp_f64( - |i, j| vcle_f64(i, j), - |a: f64, b: f64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcleq_f64() { - testq_cmp_f64( - |i, j| vcleq_f64(i, j), - |a: f64, b: f64| -> u64 { if a <= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcge_s64() { - test_cmp_s64( - |i, j| vcge_s64(i, j), - |a: i64, b: i64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgeq_s64() { - testq_cmp_s64( - |i, j| vcgeq_s64(i, j), - |a: i64, b: i64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcge_u64() { - test_cmp_u64( - |i, j| vcge_u64(i, j), - |a: u64, b: u64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgeq_u64() { - testq_cmp_u64( - |i, j| vcgeq_u64(i, j), - |a: u64, b: u64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vgeq_f64() { - test_cmp_f64( - |i, j| vcge_f64(i, j), - |a: f64, b: f64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcgeq_f64() { - testq_cmp_f64( - |i, j| vcgeq_f64(i, j), - |a: f64, b: f64| -> u64 { if a >= b { 0xFFFFFFFFFFFFFFFF } else { 0 } }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vmul_f64() { - test_ari_f64(|i, j| vmul_f64(i, j), |a: f64, b: f64| -> f64 { a * b }); - } - #[simd_test(enable = "neon")] - unsafe fn test_vmulq_f64() { - testq_ari_f64(|i, j| vmulq_f64(i, j), |a: f64, b: f64| -> f64 { a * b }); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vsub_f64() { - test_ari_f64(|i, j| vsub_f64(i, j), |a: f64, b: f64| -> f64 { a - b }); - } - #[simd_test(enable = "neon")] - unsafe fn test_vsubq_f64() { - testq_ari_f64(|i, j| vsubq_f64(i, j), |a: f64, b: f64| -> f64 { a - b }); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vabsd_s64() { - assert_eq!(vabsd_s64(-1), 1); - assert_eq!(vabsd_s64(0), 0); - assert_eq!(vabsd_s64(1), 1); - assert_eq!(vabsd_s64(i64::MIN), i64::MIN); - assert_eq!(vabsd_s64(i64::MIN + 1), i64::MAX); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabs_s64() { - let a = i64x1::new(i64::MIN); - let r: i64x1 = transmute(vabs_s64(transmute(a))); - let e = i64x1::new(i64::MIN); - assert_eq!(r, e); - let a = i64x1::new(i64::MIN + 1); - let r: i64x1 = transmute(vabs_s64(transmute(a))); - let e = i64x1::new(i64::MAX); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabsq_s64() { - let a = i64x2::new(i64::MIN, i64::MIN + 1); - let r: i64x2 = transmute(vabsq_s64(transmute(a))); - let e = i64x2::new(i64::MIN, i64::MAX); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] unsafe fn test_vbsl_f64() { let a = u64x1::new(0x8000000000000000); @@ -4766,134 +921,6 @@ mod tests { assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_s16() { - let a = i16x4::new(1, 2, 3, -4); - let r: i16 = vaddv_s16(transmute(a)); - let e = 2_i16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_u16() { - let a = u16x4::new(1, 2, 3, 4); - let r: u16 = vaddv_u16(transmute(a)); - let e = 10_u16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_s32() { - let a = i32x2::new(1, -2); - let r: i32 = vaddv_s32(transmute(a)); - let e = -1_i32; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_u32() { - let a = u32x2::new(1, 2); - let r: u32 = vaddv_u32(transmute(a)); - let e = 3_u32; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_s8() { - let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, -8); - let r: i8 = vaddv_s8(transmute(a)); - let e = 20_i8; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddv_u8() { - let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: u8 = vaddv_u8(transmute(a)); - let e = 36_u8; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_s16() { - let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, -8); - let r: i16 = vaddvq_s16(transmute(a)); - let e = 20_i16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_u16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: u16 = vaddvq_u16(transmute(a)); - let e = 36_u16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_s32() { - let a = i32x4::new(1, 2, 3, -4); - let r: i32 = vaddvq_s32(transmute(a)); - let e = 2_i32; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_u32() { - let a = u32x4::new(1, 2, 3, 4); - let r: u32 = vaddvq_u32(transmute(a)); - let e = 10_u32; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_s8() { - let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16); - let r: i8 = vaddvq_s8(transmute(a)); - let e = 104_i8; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_u8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r: u8 = vaddvq_u8(transmute(a)); - let e = 136_u8; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_s64() { - let a = i64x2::new(1, -2); - let r: i64 = vaddvq_s64(transmute(a)); - let e = -1_i64; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddvq_u64() { - let a = u64x2::new(1, 2); - let r: u64 = vaddvq_u64(transmute(a)); - let e = 3_u64; - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddlv_s8() { - let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, -8); - let r: i16 = vaddlv_s8(transmute(a)); - let e = 20_i16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddlv_u8() { - let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: u16 = vaddlv_u8(transmute(a)); - let e = 36_u16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddlvq_s8() { - let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16); - let r: i16 = vaddlvq_s8(transmute(a)); - let e = 104_i16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddlvq_u8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r: u16 = vaddlvq_u8(transmute(a)); - let e = 136_u16; - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] unsafe fn test_vld1_f64() { let a: [f64; 2] = [0., 1.]; @@ -4966,55 +993,6 @@ mod tests { assert_eq!(vals[1], 1.); assert_eq!(vals[2], 2.); } - - #[simd_test(enable = "neon,sm4")] - unsafe fn test_vsm3tt1aq_u32() { - let a: u32x4 = u32x4::new(1, 2, 3, 4); - let b: u32x4 = u32x4::new(1, 2, 3, 4); - let c: u32x4 = u32x4::new(1, 2, 3, 4); - let e: u32x4 = u32x4::new(2, 1536, 4, 16395); - let r: u32x4 = transmute(vsm3tt1aq_u32::<0>(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,sm4")] - unsafe fn test_vsm3tt1bq_u32() { - let a: u32x4 = u32x4::new(1, 2, 3, 4); - let b: u32x4 = u32x4::new(1, 2, 3, 4); - let c: u32x4 = u32x4::new(1, 2, 3, 4); - let e: u32x4 = u32x4::new(2, 1536, 4, 16392); - let r: u32x4 = transmute(vsm3tt1bq_u32::<0>(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,sm4")] - unsafe fn test_vsm3tt2aq_u32() { - let a: u32x4 = u32x4::new(1, 2, 3, 4); - let b: u32x4 = u32x4::new(1, 2, 3, 4); - let c: u32x4 = u32x4::new(1, 2, 3, 4); - let e: u32x4 = u32x4::new(2, 1572864, 4, 1447435); - let r: u32x4 = transmute(vsm3tt2aq_u32::<0>(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,sm4")] - unsafe fn test_vsm3tt2bq_u32() { - let a: u32x4 = u32x4::new(1, 2, 3, 4); - let b: u32x4 = u32x4::new(1, 2, 3, 4); - let c: u32x4 = u32x4::new(1, 2, 3, 4); - let e: u32x4 = u32x4::new(2, 1572864, 4, 1052680); - let r: u32x4 = transmute(vsm3tt2bq_u32::<0>(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,sha3")] - unsafe fn test_vxarq_u64() { - let a: u64x2 = u64x2::new(1, 2); - let b: u64x2 = u64x2::new(3, 4); - let e: u64x2 = u64x2::new(2, 6); - let r: u64x2 = transmute(vxarq_u64::<0>(transmute(a), transmute(b))); - assert_eq!(r, e); - } } #[cfg(test)] diff --git a/crates/core_arch/src/arm/mod.rs b/crates/core_arch/src/arm/mod.rs index c69661b389..11d6e2df3a 100644 --- a/crates/core_arch/src/arm/mod.rs +++ b/crates/core_arch/src/arm/mod.rs @@ -64,12 +64,3 @@ pub use crate::core_arch::arm_shared::*; #[cfg(test)] use stdarch_test::assert_instr; - -// NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) -#[cfg(target_endian = "little")] -#[cfg(any(target_feature = "v7", doc))] -pub(crate) mod neon; -#[cfg(target_endian = "little")] -#[cfg(any(target_feature = "v7", doc))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub use neon::*; diff --git a/crates/core_arch/src/arm/neon.rs b/crates/core_arch/src/arm/neon.rs index ffeb2c6fe1..3badab5a4c 100644 --- a/crates/core_arch/src/arm/neon.rs +++ b/crates/core_arch/src/arm/neon.rs @@ -1,8 +1,4 @@ use crate::core_arch::arm_shared::neon::*; -use crate::mem::{align_of, transmute}; - -#[cfg(test)] -use stdarch_test::assert_instr; #[allow(improper_ctypes)] unsafe extern "unadjusted" { @@ -10,1441 +6,4 @@ unsafe extern "unadjusted" { fn vbsl_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; #[link_name = "llvm.arm.neon.vbsl.v16i8"] fn vbslq_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - #[link_name = "llvm.arm.neon.vpadals.v4i16.v8i8"] - pub(crate) fn vpadal_s8_(a: int16x4_t, b: int8x8_t) -> int16x4_t; - #[link_name = "llvm.arm.neon.vpadals.v2i32.v4i16"] - pub(crate) fn vpadal_s16_(a: int32x2_t, b: int16x4_t) -> int32x2_t; - #[link_name = "llvm.arm.neon.vpadals.v1i64.v2i32"] - pub(crate) fn vpadal_s32_(a: int64x1_t, b: int32x2_t) -> int64x1_t; - #[link_name = "llvm.arm.neon.vpadals.v8i16.v16i8"] - pub(crate) fn vpadalq_s8_(a: int16x8_t, b: int8x16_t) -> int16x8_t; - #[link_name = "llvm.arm.neon.vpadals.v4i32.v8i16"] - pub(crate) fn vpadalq_s16_(a: int32x4_t, b: int16x8_t) -> int32x4_t; - #[link_name = "llvm.arm.neon.vpadals.v2i64.v4i32"] - pub(crate) fn vpadalq_s32_(a: int64x2_t, b: int32x4_t) -> int64x2_t; - - #[link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8"] - pub(crate) fn vpadal_u8_(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t; - #[link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16"] - pub(crate) fn vpadal_u16_(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t; - #[link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32"] - pub(crate) fn vpadal_u32_(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t; - #[link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8"] - pub(crate) fn vpadalq_u8_(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t; - #[link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16"] - pub(crate) fn vpadalq_u16_(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t; - #[link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32"] - pub(crate) fn vpadalq_u32_(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t; - - #[link_name = "llvm.arm.neon.vtbl1"] - fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbl2"] - fn vtbl2(a: int8x8_t, b: int8x8_t, b: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbl3"] - fn vtbl3(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbl4"] - fn vtbl4(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; - - #[link_name = "llvm.arm.neon.vtbx1"] - fn vtbx1(a: int8x8_t, b: int8x8_t, b: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbx2"] - fn vtbx2(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbx3"] - fn vtbx3(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vtbx4"] - fn vtbx4( - a: int8x8_t, - b: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - ) -> int8x8_t; - - #[link_name = "llvm.arm.neon.vshiftins.v8i8"] - fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, shift: int8x8_t) -> int8x8_t; - #[link_name = "llvm.arm.neon.vshiftins.v16i8"] - fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, shift: int8x16_t) -> int8x16_t; - #[link_name = "llvm.arm.neon.vshiftins.v4i16"] - fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, shift: int16x4_t) -> int16x4_t; - #[link_name = "llvm.arm.neon.vshiftins.v8i16"] - fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, shift: int16x8_t) -> int16x8_t; - #[link_name = "llvm.arm.neon.vshiftins.v2i32"] - fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, shift: int32x2_t) -> int32x2_t; - #[link_name = "llvm.arm.neon.vshiftins.v4i32"] - fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, shift: int32x4_t) -> int32x4_t; - #[link_name = "llvm.arm.neon.vshiftins.v1i64"] - fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, shift: int64x1_t) -> int64x1_t; - #[link_name = "llvm.arm.neon.vshiftins.v2i64"] - fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, shift: int64x2_t) -> int64x2_t; - - #[link_name = "llvm.arm.neon.vld1.v8i8.p0i8"] - fn vld1_v8i8(addr: *const i8, align: i32) -> int8x8_t; - #[link_name = "llvm.arm.neon.vld1.v16i8.p0i8"] - fn vld1q_v16i8(addr: *const i8, align: i32) -> int8x16_t; - #[link_name = "llvm.arm.neon.vld1.v4i16.p0i8"] - fn vld1_v4i16(addr: *const i8, align: i32) -> int16x4_t; - #[link_name = "llvm.arm.neon.vld1.v8i16.p0i8"] - fn vld1q_v8i16(addr: *const i8, align: i32) -> int16x8_t; - #[link_name = "llvm.arm.neon.vld1.v2i32.p0i8"] - fn vld1_v2i32(addr: *const i8, align: i32) -> int32x2_t; - #[link_name = "llvm.arm.neon.vld1.v4i32.p0i8"] - fn vld1q_v4i32(addr: *const i8, align: i32) -> int32x4_t; - #[link_name = "llvm.arm.neon.vld1.v1i64.p0i8"] - fn vld1_v1i64(addr: *const i8, align: i32) -> int64x1_t; - #[link_name = "llvm.arm.neon.vld1.v2i64.p0i8"] - fn vld1q_v2i64(addr: *const i8, align: i32) -> int64x2_t; - #[link_name = "llvm.arm.neon.vld1.v2f32.p0i8"] - fn vld1_v2f32(addr: *const i8, align: i32) -> float32x2_t; - #[link_name = "llvm.arm.neon.vld1.v4f32.p0i8"] - fn vld1q_v4f32(addr: *const i8, align: i32) -> float32x4_t; - - #[link_name = "llvm.arm.neon.vst1.p0i8.v8i8"] - fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v16i8"] - fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v4i16"] - fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v8i16"] - fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v2i32"] - fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v4i32"] - fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v1i64"] - fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v2i64"] - fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v2f32"] - fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); - #[link_name = "llvm.arm.neon.vst1.p0i8.v4f32"] - fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - vld1_v8i8(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - vld1q_v16i8(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - vld1_v4i16(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - vld1q_v8i16(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - vld1_v2i32(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - vld1q_v4i32(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { - vld1_v1i64(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - vld1q_v2i64(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - transmute(vld1_v8i8(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - transmute(vld1q_v16i8(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - transmute(vld1_v4i16(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - transmute(vld1q_v8i16(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - transmute(vld1_v2i32(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - transmute(vld1q_v4i32(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { - transmute(vld1_v1i64(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - transmute(vld1q_v2i64(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - transmute(vld1_v8i8(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - transmute(vld1q_v16i8(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - transmute(vld1_v4i16(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - transmute(vld1q_v8i16(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - transmute(vld1_v1i64(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64) -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr("vld1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - transmute(vld1q_v2i64(ptr as *const i8, align_of::() as i32)) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vldr))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - vld1_v2f32(ptr as *const i8, align_of::() as i32) -} - -/// Load multiple single-element structures to one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vld1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - vld1q_v4f32(ptr as *const i8, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - vst1_v8i8(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - vst1q_v16i8(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - vst1_v4i16(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - vst1q_v8i16(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - vst1_v2i32(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - vst1q_v4i32(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { - vst1_v1i64(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - vst1q_v2i64(ptr as *const i8, a, align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - vst1_v8i8(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - vst1q_v16i8(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - vst1_v4i16(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - vst1q_v8i16(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - vst1_v2i32(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - vst1q_v4i32(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { - vst1_v1i64(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - vst1q_v2i64(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - vst1_v8i8(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.8"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - vst1q_v16i8(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - vst1_v4i16(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.16"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - vst1q_v8i16(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64) -#[inline] -#[target_feature(enable = "neon,aes,v8")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { - vst1_v1i64(ptr as *const i8, transmute(a), align_of::() as i32) -} - -/// Store multiple single-element structures from one, two, three, or four registers. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64) -#[inline] -#[target_feature(enable = "neon,aes,v8")] -#[cfg_attr(test, assert_instr("vst1.64"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - vst1q_v2i64(ptr as *const i8, transmute(a), align_of::() as i32) -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - vst1_v2f32(ptr as *const i8, a, align_of::() as i32) -} - -// Store multiple single-element structures from one, two, three, or four registers. -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vst1.32"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - vst1q_v4f32(ptr as *const i8, a, align_of::() as i32) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vtbl1(a, b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl1(transmute(a), transmute(b))) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl1(transmute(a), transmute(b))) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vtbl2(a.0, a.1, b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - vtbl3(a.0, a.1, a.2, b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - vtbl4(a.0, a.1, a.2, a.3, b) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) -} - -/// Table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbl))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - vtbx1(a, b, c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx1(transmute(a), transmute(b), transmute(c))) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx1(transmute(a), transmute(b), transmute(c))) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - vtbx2(a, b.0, b.1, c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - vtbx3(a, b.0, b.1, b.2, c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - vtbx4(a, b.0, b.1, b.2, b.3, c) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) -} - -/// Extended table look-up -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vtbx))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) -} - -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - vshiftins_v8i8(a, b, int8x8_t::splat(n)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - vshiftins_v16i8(a, b, int8x16_t::splat(n)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - vshiftins_v4i16(a, b, int16x4_t::splat(n)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - vshiftins_v8i16(a, b, int16x8_t::splat(n)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - vshiftins_v2i32(a, b, int32x2_t::splat(N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - vshiftins_v4i32(a, b, int32x4_t::splat(N)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(0 <= N && N <= 63); - vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(0 <= N && N <= 63); - vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(N), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(N), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(0 <= N && N <= 63); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(N as i64), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(0 <= N && N <= 63); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - let n = N as i8; - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(n), - )) -} -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(n), - )) -} - -/// Shift Left and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - let n = N as i16; - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(n), - )) -} - -/// Shift Left and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64) -#[inline] -#[target_feature(enable = "neon,v7,aes")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(0 <= N && N <= 63); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(N as i64), - )) -} - -/// Shift Left and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64) -#[inline] -#[target_feature(enable = "neon,v7,aes")] -#[cfg_attr(test, assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(0 <= N && N <= 63); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - vshiftins_v8i8(a, b, int8x8_t::splat(n)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - vshiftins_v16i8(a, b, int8x16_t::splat(n)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - vshiftins_v4i16(a, b, int16x4_t::splat(n)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - vshiftins_v8i16(a, b, int16x8_t::splat(n)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(1 <= N && N <= 32); - vshiftins_v2i32(a, b, int32x2_t::splat(-N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(1 <= N && N <= 32); - vshiftins_v4i32(a, b, int32x4_t::splat(-N)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(1 <= N && N <= 64); - vshiftins_v1i64(a, b, int64x1_t::splat(-N as i64)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(1 <= N && N <= 64); - vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(1 <= N && N <= 32); - transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(-N), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(1 <= N && N <= 32); - transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(-N), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(-N as i64), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(1 <= N && N <= 8); - let n = -N as i8; - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(n), - )) -} -/// Shift Right and Insert (immediate) -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(1 <= N && N <= 16); - let n = -N as i16; - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(n), - )) -} - -/// Shift Right and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64) -#[inline] -#[target_feature(enable = "neon,v7,aes")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(-N as i64), - )) -} - -/// Shift Right and Insert (immediate) -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64) -#[inline] -#[target_feature(enable = "neon,v7,aes")] -#[cfg_attr(test, assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core_arch::{arm::*, simd::*}; - use crate::mem::transmute; - use stdarch_test::simd_test; - - #[simd_test(enable = "neon")] - unsafe fn test_vcvtq_s32_f32() { - let f = f32x4::new(-1., 2., 3., 4.); - let e = i32x4::new(-1, 2, 3, 4); - let r: i32x4 = transmute(vcvtq_s32_f32(transmute(f))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vcvtq_u32_f32() { - let f = f32x4::new(1., 2., 3., 4.); - let e = u32x4::new(1, 2, 3, 4); - let r: u32x4 = transmute(vcvtq_u32_f32(transmute(f))); - assert_eq!(r, e); - } } diff --git a/crates/core_arch/src/arm_shared/crc.rs b/crates/core_arch/src/arm_shared/crc.rs deleted file mode 100644 index 1c10af05f6..0000000000 --- a/crates/core_arch/src/arm_shared/crc.rs +++ /dev/null @@ -1,279 +0,0 @@ -unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32b" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32b")] - fn crc32b_(crc: u32, data: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32h")] - fn crc32h_(crc: u32, data: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32w" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] - fn crc32w_(crc: u32, data: u32) -> u32; - - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32cb" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cb")] - fn crc32cb_(crc: u32, data: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32ch" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32ch")] - fn crc32ch_(crc: u32, data: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32cw" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] - fn crc32cw_(crc: u32, data: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32x" - )] - fn crc32x_(crc: u32, data: u64) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crc32cx" - )] - fn crc32cx_(crc: u32, data: u64) -> u32; -} - -#[cfg(test)] -use stdarch_test::assert_instr; - -/// CRC32 single round checksum for bytes (8 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32b) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32b))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { - crc32b_(crc, data as u32) -} - -/// CRC32 single round checksum for half words (16 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32h))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { - crc32h_(crc, data as u32) -} - -/// CRC32 single round checksum for words (32 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32w))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { - crc32w_(crc, data) -} - -/// CRC32-C single round checksum for bytes (8 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32cb))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { - crc32cb_(crc, data as u32) -} - -/// CRC32-C single round checksum for half words (16 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32ch))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { - crc32ch_(crc, data as u32) -} - -/// CRC32-C single round checksum for words (32 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw) -#[inline] -#[target_feature(enable = "crc")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(crc32cw))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { - crc32cw_(crc, data) -} - -/// CRC32 single round checksum for quad words (64 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d) -#[inline] -#[target_feature(enable = "crc")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(crc32x))] -#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] -pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { - crc32x_(crc, data) -} - -/// CRC32 single round checksum for quad words (64 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d) -#[inline] -#[target_feature(enable = "crc")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(crc32w))] -#[unstable(feature = "stdarch_aarch32_crc32", issue = "125085")] -pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { - // On 32-bit ARM this intrinsic emits a chain of two `crc32_w` instructions - // and truncates the data to 32 bits in both clang and gcc - crc32w_( - crc32w_(crc, (data & 0xffffffff) as u32), - (data >> 32) as u32, - ) -} - -/// CRC32 single round checksum for quad words (64 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd) -#[inline] -#[target_feature(enable = "crc")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(crc32cx))] -#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] -pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { - crc32cx_(crc, data) -} - -/// CRC32 single round checksum for quad words (64 bits). -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd) -#[inline] -#[target_feature(enable = "crc")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(crc32cw))] -#[unstable(feature = "stdarch_aarch32_crc32", issue = "125085")] -pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { - // On 32-bit ARM this intrinsic emits a chain of two `crc32_cw` instructions - // and truncates the data to 32 bits in both clang and gcc - crc32cw_( - crc32cw_(crc, (data & 0xffffffff) as u32), - (data >> 32) as u32, - ) -} - -#[cfg(test)] -mod tests { - use crate::core_arch::{arm_shared::*, simd::*}; - use std::mem; - use stdarch_test::simd_test; - - #[simd_test(enable = "crc")] - unsafe fn test_crc32d() { - assert_eq!(__crc32d(0, 0), 0); - assert_eq!(__crc32d(0, 18446744073709551615), 1147535477); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32cd() { - assert_eq!(__crc32cd(0, 0), 0); - assert_eq!(__crc32cd(0, 18446744073709551615), 3293575501); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32b() { - assert_eq!(__crc32b(0, 0), 0); - assert_eq!(__crc32b(0, 255), 755167117); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32h() { - assert_eq!(__crc32h(0, 0), 0); - assert_eq!(__crc32h(0, 16384), 1994146192); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32w() { - assert_eq!(__crc32w(0, 0), 0); - assert_eq!(__crc32w(0, 4294967295), 3736805603); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32cb() { - assert_eq!(__crc32cb(0, 0), 0); - assert_eq!(__crc32cb(0, 255), 2910671697); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32ch() { - assert_eq!(__crc32ch(0, 0), 0); - assert_eq!(__crc32ch(0, 16384), 1098587580); - } - - #[simd_test(enable = "crc")] - unsafe fn test_crc32cw() { - assert_eq!(__crc32cw(0, 0), 0); - assert_eq!(__crc32cw(0, 4294967295), 3080238136); - } -} diff --git a/crates/core_arch/src/arm_shared/crypto.rs b/crates/core_arch/src/arm_shared/crypto.rs deleted file mode 100644 index 07c96008d9..0000000000 --- a/crates/core_arch/src/arm_shared/crypto.rs +++ /dev/null @@ -1,544 +0,0 @@ -use crate::core_arch::arm_shared::{uint8x16_t, uint32x4_t}; - -#[allow(improper_ctypes)] -unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aese" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] - fn vaeseq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesd" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] - fn vaesdq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesmc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] - fn vaesmcq_u8_(data: uint8x16_t) -> uint8x16_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesimc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] - fn vaesimcq_u8_(data: uint8x16_t) -> uint8x16_t; - - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] - fn vsha1h_u32_(hash_e: u32) -> u32; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] - fn vsha1su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] - fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1c" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] - fn vsha1cq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1p" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] - fn vsha1pq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1m" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] - fn vsha1mq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] - fn vsha256hq_u32_(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h2" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] - fn vsha256h2q_u32_(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] - fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t; - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] - fn vsha256su1q_u32_(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; -} - -#[cfg(test)] -use stdarch_test::assert_instr; - -/// AES single round encryption. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8) -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aese))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - vaeseq_u8_(data, key) -} - -/// AES single round decryption. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8) -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesd))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - vaesdq_u8_(data, key) -} - -/// AES mix columns. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8) -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesmc))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - vaesmcq_u8_(data) -} - -/// AES inverse mix columns. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8) -#[inline] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesimc))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - vaesimcq_u8_(data) -} - -/// SHA1 fixed rotate. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1h))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { - vsha1h_u32_(hash_e) -} - -/// SHA1 hash update accelerator, choose. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1c))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - vsha1cq_u32_(hash_abcd, hash_e, wk) -} - -/// SHA1 hash update accelerator, majority. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1m))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - vsha1mq_u32_(hash_abcd, hash_e, wk) -} - -/// SHA1 hash update accelerator, parity. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1p))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - vsha1pq_u32_(hash_abcd, hash_e, wk) -} - -/// SHA1 schedule update accelerator, first part. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su0))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - vsha1su0q_u32_(w0_3, w4_7, w8_11) -} - -/// SHA1 schedule update accelerator, second part. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su1))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - vsha1su1q_u32_(tw0_3, w12_15) -} - -/// SHA256 hash update accelerator. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha256hq_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - vsha256hq_u32_(hash_abcd, hash_efgh, wk) -} - -/// SHA256 hash update accelerator, upper part. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h2))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha256h2q_u32( - hash_efgh: uint32x4_t, - hash_abcd: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - vsha256h2q_u32_(hash_efgh, hash_abcd, wk) -} - -/// SHA256 schedule update accelerator, first part. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su0))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - vsha256su0q_u32_(w0_3, w4_7) -} - -/// SHA256 schedule update accelerator, second part. -/// -/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32) -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su1))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha256su1q_u32( - tw0_3: uint32x4_t, - w8_11: uint32x4_t, - w12_15: uint32x4_t, -) -> uint32x4_t { - vsha256su1q_u32_(tw0_3, w8_11, w12_15) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core_arch::{arm_shared::*, simd::*}; - use std::mem; - use stdarch_test::simd_test; - - #[simd_test(enable = "aes")] - unsafe fn test_vaeseq_u8() { - let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); - let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7)); - let r: u8x16 = mem::transmute(vaeseq_u8(data, key)); - assert_eq!( - r, - u8x16::new( - 124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118, 124, 123, 124, 197 - ) - ); - } - - #[simd_test(enable = "aes")] - unsafe fn test_vaesdq_u8() { - let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); - let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7)); - let r: u8x16 = mem::transmute(vaesdq_u8(data, key)); - assert_eq!( - r, - u8x16::new(9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56) - ); - } - - #[simd_test(enable = "aes")] - unsafe fn test_vaesmcq_u8() { - let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); - let r: u8x16 = mem::transmute(vaesmcq_u8(data)); - assert_eq!( - r, - u8x16::new(3, 4, 9, 10, 15, 8, 21, 30, 3, 4, 9, 10, 15, 8, 21, 30) - ); - } - - #[simd_test(enable = "aes")] - unsafe fn test_vaesimcq_u8() { - let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); - let r: u8x16 = mem::transmute(vaesimcq_u8(data)); - assert_eq!( - r, - u8x16::new( - 43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70 - ) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1h_u32() { - assert_eq!(vsha1h_u32(0x1234), 0x048d); - assert_eq!(vsha1h_u32(0x5678), 0x159e); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1su0q_u32() { - let r: u32x4 = mem::transmute(vsha1su0q_u32( - mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)), - mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)), - mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)), - )); - assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678)); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1su1q_u32() { - let r: u32x4 = mem::transmute(vsha1su1q_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x00008898, 0x00019988, 0x00008898, 0x0000acd0) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1cq_u32() { - let r: u32x4 = mem::transmute(vsha1cq_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - 0x1234, - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x8a32cbd8, 0x0c518a96, 0x0018a081, 0x0000c168) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1pq_u32() { - let r: u32x4 = mem::transmute(vsha1pq_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - 0x1234, - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x469f0ba3, 0x0a326147, 0x80145d7f, 0x00009f47) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha1mq_u32() { - let r: u32x4 = mem::transmute(vsha1mq_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - 0x1234, - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0xaa39693b, 0x0d51bf84, 0x001aa109, 0x0000d278) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha256hq_u32() { - let r: u32x4 = mem::transmute(vsha256hq_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x05e9aaa8, 0xec5f4c02, 0x20a1ea61, 0x28738cef) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha256h2q_u32() { - let r: u32x4 = mem::transmute(vsha256h2q_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x3745362e, 0x2fb51d00, 0xbd4c529b, 0x968b8516) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha256su0q_u32() { - let r: u32x4 = mem::transmute(vsha256su0q_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0xe59e1c97, 0x5eaf68da, 0xd7bcb51f, 0x6c8de152) - ); - } - - #[simd_test(enable = "sha2")] - unsafe fn test_vsha256su1q_u32() { - let r: u32x4 = mem::transmute(vsha256su1q_u32( - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), - )); - assert_eq!( - r, - u32x4::new(0x5e09e8d2, 0x74a6f16b, 0xc966606b, 0xa686ee9f) - ); - } -} diff --git a/crates/core_arch/src/arm_shared/mod.rs b/crates/core_arch/src/arm_shared/mod.rs index 9dee6aed3b..dcfa500085 100644 --- a/crates/core_arch/src/arm_shared/mod.rs +++ b/crates/core_arch/src/arm_shared/mod.rs @@ -60,46 +60,6 @@ mod hints; #[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub use self::hints::*; -mod crc; -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_aarch32_crc32", issue = "125085") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") -)] -pub use crc::*; - -// NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) -#[cfg(target_endian = "little")] -#[cfg(any( - target_arch = "aarch64", - target_arch = "arm64ec", - target_feature = "v7", - doc -))] -mod crypto; -// NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) -#[cfg(target_endian = "little")] -#[cfg(any( - target_arch = "aarch64", - target_arch = "arm64ec", - target_feature = "v7", - doc -))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub use self::crypto::*; - -// NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) -#[cfg(target_endian = "little")] #[cfg(any( target_arch = "aarch64", target_arch = "arm64ec", @@ -107,7 +67,7 @@ pub use self::crypto::*; doc ))] pub(crate) mod neon; -#[cfg(target_endian = "little")] + #[cfg(any( target_arch = "aarch64", target_arch = "arm64ec", diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index b7854051ae..4b49d77889 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -1144,296 +1144,6 @@ impl_sign_conversions_neon! { (uint8x8x4_t, int8x8x4_t) } -#[allow(improper_ctypes)] -unsafe extern "unadjusted" { - // absolute value (64-bit) - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i8" - )] - fn vabs_s8_(a: int8x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i16" - )] - fn vabs_s16_(a: int16x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i32" - )] - fn vabs_s32_(a: int32x2_t) -> int32x2_t; - // absolute value (128-bit) - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v16i8" - )] - fn vabsq_s8_(a: int8x16_t) -> int8x16_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i16" - )] - fn vabsq_s16_(a: int16x8_t) -> int16x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i32" - )] - fn vabsq_s32_(a: int32x4_t) -> int32x4_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i8" - )] - fn vpmins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i16" - )] - fn vpmins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v2i32" - )] - fn vpmins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i8" - )] - fn vpminu_v8i8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i16" - )] - fn vpminu_v4i16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v2i32" - )] - fn vpminu_v2i32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f32" - )] - fn vpminf_v2f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i8" - )] - fn vpmaxs_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i16" - )] - fn vpmaxs_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v2i32" - )] - fn vpmaxs_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i8" - )] - fn vpmaxu_v8i8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i16" - )] - fn vpmaxu_v4i16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v2i32" - )] - fn vpmaxu_v2i32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f32" - )] - fn vpmaxf_v2f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i8" - )] - fn vraddhn_s16_(a: int16x8_t, b: int16x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i16" - )] - fn vraddhn_s32_(a: int32x4_t, b: int32x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i32" - )] - fn vraddhn_s64_(a: int64x2_t, b: int64x2_t) -> int32x2_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i16" - )] - fn vpadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i32" - )] - fn vpadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i8" - )] - fn vpadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" - )] - pub(crate) fn vpaddl_s8_(a: int8x8_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" - )] - pub(crate) fn vpaddl_s16_(a: int16x4_t) -> int32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" - )] - pub(crate) fn vpaddl_s32_(a: int32x2_t) -> int64x1_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" - )] - pub(crate) fn vpaddlq_s8_(a: int8x16_t) -> int16x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" - )] - pub(crate) fn vpaddlq_s16_(a: int16x8_t) -> int32x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" - )] - pub(crate) fn vpaddlq_s32_(a: int32x4_t) -> int64x2_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" - )] - pub(crate) fn vpaddl_u8_(a: uint8x8_t) -> uint16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" - )] - pub(crate) fn vpaddl_u16_(a: uint16x4_t) -> uint32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" - )] - pub(crate) fn vpaddl_u32_(a: uint32x2_t) -> uint64x1_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" - )] - pub(crate) fn vpaddlq_u8_(a: uint8x16_t) -> uint16x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" - )] - pub(crate) fn vpaddlq_u16_(a: uint16x8_t) -> uint32x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" - )] - pub(crate) fn vpaddlq_u32_(a: uint32x4_t) -> uint64x2_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v8i8" - )] - fn vcnt_s8_(a: int8x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v16i8" - )] - fn vcntq_s8_(a: int8x16_t) -> int8x16_t; - - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i8" - )] - fn vclz_s8_(a: int8x8_t) -> int8x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v16i8" - )] - fn vclzq_s8_(a: int8x16_t) -> int8x16_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i16" - )] - fn vclz_s16_(a: int16x4_t) -> int16x4_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i16" - )] - fn vclzq_s16_(a: int16x8_t) -> int16x8_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v2i32" - )] - fn vclz_s32_(a: int32x2_t) -> int32x2_t; - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i32" - )] - fn vclzq_s32_(a: int32x4_t) -> int32x4_t; -} - /// Load one single-element structure to one lane of one register. #[inline] #[target_feature(enable = "neon")] @@ -2771,248 +2481,6 @@ pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 simd_add(a, vabdq_u32(b, c)) } -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { - vabs_s8_(a) -} -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { - vabs_s16_(a) -} -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { - vabs_s32_(a) -} -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { - vabsq_s8_(a) -} -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { - vabsq_s16_(a) -} -/// Absolute value (wrapping). -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { - vabsq_s32_(a) -} - -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - vpadd_s16_(a, b) -} -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - vpadd_s32_(a, b) -} -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vpadd_s8_(a, b) -} -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - transmute(vpadd_s16_(transmute(a), transmute(b))) -} -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - transmute(vpadd_s32_(transmute(a), transmute(b))) -} -/// Add pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - transmute(vpadd_s8_(transmute(a), transmute(b))) -} - /// Vector add. #[inline] #[target_feature(enable = "neon")] @@ -4165,14 +3633,14 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui simd_shuffle!(r, x, [0, 1, 2, 3]) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4182,18 +3650,18 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - vraddhn_s16_(a, b) +pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { + simd_cast(a) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4203,18 +3671,18 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - vraddhn_s32_(a, b) +pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { + simd_cast(a) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4224,18 +3692,18 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - vraddhn_s64_(a, b) +pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { + simd_cast(a) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4245,18 +3713,18 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - transmute(vraddhn_s16_(transmute(a), transmute(b))) +pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { + simd_cast(a) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4266,18 +3734,18 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - transmute(vraddhn_s32_(transmute(a), transmute(b))) +pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { + simd_cast(a) } -/// Rounding Add returning High Narrow. +/// Vector narrow integer. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4287,18 +3755,18 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - transmute(vraddhn_s64_(transmute(a), transmute(b))) +pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4308,19 +3776,18 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { - let x = vraddhn_s16_(a, b); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4330,19 +3797,18 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { - let x = vraddhn_s32_(a, b); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4352,19 +3818,18 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { - let x = vraddhn_s64_(a, b); - simd_shuffle!(r, x, [0, 1, 2, 3]) +pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4374,19 +3839,18 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { - let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4396,19 +3860,18 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { - let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b))); - simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { + simd_cast(a) } -/// Rounding Add returning High Narrow (high half). +/// Vector long move. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vraddhn.i64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -4418,19 +3881,18 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { - let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b))); - simd_shuffle!(r, x, [0, 1, 2, 3]) +pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { + simd_cast(a) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4440,18 +3902,19 @@ pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - vpaddl_s8_(a) +pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { + let b = int8x8_t::splat(-1); + simd_xor(a, b) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4461,18 +3924,19 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - vpaddl_s16_(a) +pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { + let b = int8x16_t::splat(-1); + simd_xor(a, b) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4482,18 +3946,19 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - vpaddl_s32_(a) +pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { + let b = int16x4_t::splat(-1); + simd_xor(a, b) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4503,18 +3968,19 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - vpaddlq_s8_(a) +pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { + let b = int16x8_t::splat(-1); + simd_xor(a, b) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4524,18 +3990,19 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - vpaddlq_s16_(a) +pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { + let b = int32x2_t::splat(-1); + simd_xor(a, b) } -/// Signed Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.s32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4545,18 +4012,19 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - vpaddlq_s32_(a) +pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { + let b = int32x4_t::splat(-1); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4566,18 +4034,19 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - vpaddl_u8_(a) +pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { + let b = uint8x8_t::splat(255); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4587,18 +4056,19 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - vpaddl_u16_(a) +pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { + let b = uint8x16_t::splat(255); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4608,18 +4078,19 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - vpaddl_u32_(a) +pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { + let b = uint16x4_t::splat(65_535); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4629,18 +4100,19 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - vpaddlq_u8_(a) +pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { + let b = uint16x8_t::splat(65_535); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u16))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4650,18 +4122,19 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - vpaddlq_u16_(a) +pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { + let b = uint32x2_t::splat(4_294_967_295); + simd_xor(a, b) } -/// Unsigned Add Long Pairwise. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpaddl.u32))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4671,18 +4144,19 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - vpaddlq_u32_(a) +pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { + let b = uint32x4_t::splat(4_294_967_295); + simd_xor(a, b) } -/// Vector narrow integer. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4692,18 +4166,19 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { - simd_cast(a) +pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { + let b = poly8x8_t::splat(255); + simd_xor(a, b) } -/// Vector narrow integer. +/// Vector bitwise not. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -4713,18 +4188,19 @@ pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { - simd_cast(a) +pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { + let b = poly8x16_t::splat(255); + simd_xor(a, b) } -/// Vector narrow integer. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4734,18 +4210,19 @@ pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { - simd_cast(a) +pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let c = int8x8_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector narrow integer. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4755,18 +4232,19 @@ pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { - simd_cast(a) +pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let c = int8x16_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector narrow integer. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4776,18 +4254,19 @@ pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { - simd_cast(a) +pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let c = int16x4_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector narrow integer. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4797,18 +4276,19 @@ pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { - simd_cast(a) +pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let c = int16x8_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4818,18 +4298,19 @@ pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { - simd_cast(a) +pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let c = int32x2_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4839,18 +4320,19 @@ pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { - simd_cast(a) +pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let c = int32x4_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4860,18 +4342,19 @@ pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { - simd_cast(a) +pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + let c = int64x1_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4881,18 +4364,19 @@ pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { - simd_cast(a) +pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let c = int64x2_t::splat(-1); + simd_and(simd_xor(b, c), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4902,18 +4386,19 @@ pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { - simd_cast(a) +pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c = int8x8_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector long move. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4923,18 +4408,19 @@ pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { - simd_cast(a) +pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c = int8x16_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4944,19 +4430,19 @@ pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { - let b = int8x8_t::splat(-1); - simd_xor(a, b) +pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c = int16x4_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4966,19 +4452,19 @@ pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { - let b = int8x16_t::splat(-1); - simd_xor(a, b) +pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c = int16x8_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -4988,19 +4474,19 @@ pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { - let b = int16x4_t::splat(-1); - simd_xor(a, b) +pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c = int32x2_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -5010,19 +4496,19 @@ pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { - let b = int16x8_t::splat(-1); - simd_xor(a, b) +pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c = int32x4_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -5032,19 +4518,19 @@ pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { - let b = int32x2_t::splat(-1); - simd_xor(a, b) -} +pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c = int64x1_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) +} -/// Vector bitwise not. +/// Vector bitwise bit clear #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bic) )] #[cfg_attr( not(target_arch = "arm"), @@ -5054,19 +4540,23 @@ pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { - let b = int32x4_t::splat(-1); - simd_xor(a, b) +pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c = int64x2_t::splat(-1); + simd_and(simd_xor(b, transmute(c)), a) } -/// Vector bitwise not. +/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register +/// to the corresponding bit from the first source SIMD&FP register when the original +/// destination bit was 1, otherwise from the second source SIMD&FP register. + +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5076,19 +4566,22 @@ pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { - let b = uint8x8_t::splat(255); - simd_xor(a, b) +pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + let not = int8x8_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5098,19 +4591,22 @@ pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { - let b = uint8x16_t::splat(255); - simd_xor(a, b) +pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + let not = int16x4_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5120,19 +4616,22 @@ pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { - let b = uint16x4_t::splat(65_535); - simd_xor(a, b) +pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + let not = int32x2_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5142,19 +4641,22 @@ pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { - let b = uint16x8_t::splat(65_535); - simd_xor(a, b) +pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { + let not = int64x1_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5164,19 +4666,19 @@ pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { - let b = uint32x2_t::splat(4_294_967_295); - simd_xor(a, b) +pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let not = int8x8_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5186,19 +4688,19 @@ pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { - let b = uint32x4_t::splat(4_294_967_295); - simd_xor(a, b) +pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + let not = int16x4_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5208,19 +4710,19 @@ pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { - let b = poly8x8_t::splat(255); - simd_xor(a, b) +pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + let not = int32x2_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise not. +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5230,19 +4732,19 @@ pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { - let b = poly8x16_t::splat(255); - simd_xor(a, b) +pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { + let not = int64x1_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise bit clear +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5252,19 +4754,22 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let c = int8x8_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let not = int32x2_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5274,19 +4779,22 @@ pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let c = int8x16_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { + let not = int8x8_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5296,19 +4804,22 @@ pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let c = int16x4_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { + let not = int16x4_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5318,19 +4829,22 @@ pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let c = int16x8_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + let not = int8x16_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5340,19 +4854,22 @@ pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let c = int32x2_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + let not = int16x8_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5362,19 +4879,22 @@ pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let c = int32x4_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + let not = int32x4_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5384,19 +4904,22 @@ pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - let c = int64x1_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + let not = int64x2_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5406,19 +4929,19 @@ pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let c = int64x2_t::splat(-1); - simd_and(simd_xor(b, c), a) +pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let not = int8x16_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5428,19 +4951,19 @@ pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c = int8x8_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) -} - -/// Vector bitwise bit clear +pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + let not = int16x8_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +} + +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5450,19 +4973,19 @@ pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c = int8x16_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + let not = int32x4_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5472,19 +4995,19 @@ pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c = int16x4_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + let not = int64x2_t::splat(-1); + simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5494,19 +5017,22 @@ pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c = int16x8_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { + let not = int8x16_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5516,19 +5042,22 @@ pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c = int32x2_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { + let not = int16x8_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Bitwise Select. (128-bit) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(bsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -5538,19 +5067,22 @@ pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c = int32x4_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let not = int32x4_t::splat(-1); + transmute(simd_or( + simd_and(a, transmute(b)), + simd_and(simd_xor(a, transmute(not)), transmute(c)), + )) } -/// Vector bitwise bit clear +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5560,19 +5092,19 @@ pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c = int64x1_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let c = int8x8_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Vector bitwise bit clear +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bic) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5582,23 +5114,19 @@ pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c = int64x2_t::splat(-1); - simd_and(simd_xor(b, transmute(c)), a) +pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let c = int8x16_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register -/// to the corresponding bit from the first source SIMD&FP register when the original -/// destination bit was 1, otherwise from the second source SIMD&FP register. - -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5608,22 +5136,19 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let not = int8x8_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let c = int16x4_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5633,22 +5158,19 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - let not = int16x4_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let c = int16x8_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5658,22 +5180,19 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - let not = int32x2_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let c = int32x2_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5683,22 +5202,19 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { - let not = int64x1_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let c = int32x4_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5708,19 +5224,19 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let not = int8x8_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + let c = int64x1_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5730,19 +5246,19 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - let not = int16x4_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let c = int64x2_t::splat(-1); + simd_or(simd_xor(b, c), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5752,19 +5268,19 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - let not = int32x2_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c = int8x8_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5774,19 +5290,19 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { - let not = int64x1_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c = int8x16_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5796,22 +5312,19 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let not = int32x2_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c = int16x4_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5821,22 +5334,19 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { - let not = int8x8_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c = int16x8_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5846,22 +5356,19 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { - let not = int16x4_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c = int32x2_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. (128-bit) +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5871,22 +5378,19 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - let not = int8x16_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c = int32x4_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. (128-bit) +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5896,22 +5400,19 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - let not = int16x8_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c = int64x1_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. (128-bit) +/// Vector bitwise inclusive OR NOT #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -5921,23 +5422,17 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - let not = int32x4_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c = int64x2_t::splat(-1); + simd_or(simd_xor(b, transmute(c)), a) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5946,23 +5441,17 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - let not = int64x2_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5971,20 +5460,17 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let not = int8x16_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { + static_assert!(IMM5 == 0); + simd_extract!(v, 0) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -5993,20 +5479,17 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - let not = int16x8_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6015,20 +5498,17 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - let not = int32x4_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6037,20 +5517,17 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - let not = int64x2_t::splat(-1); - simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) +pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6059,23 +5536,17 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { - let not = int8x16_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6084,23 +5555,17 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { - let not = int16x8_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Bitwise Select. (128-bit) +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(bsl) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6109,23 +5574,17 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let not = int32x4_t::splat(-1); - transmute(simd_or( - simd_and(a, transmute(b)), - simd_and(simd_xor(a, transmute(not)), transmute(c)), - )) +pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6134,20 +5593,17 @@ pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let c = int8x8_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6156,20 +5612,17 @@ pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let c = int8x16_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { + static_assert!(IMM5 == 0); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6178,20 +5631,17 @@ pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let c = int16x4_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6200,20 +5650,17 @@ pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let c = int16x8_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { + static_assert!(IMM5 == 0); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6222,20 +5669,17 @@ pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let c = int32x2_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { + static_assert_uimm_bits!(IMM5, 1); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6244,20 +5688,17 @@ pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let c = int32x4_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6266,20 +5707,17 @@ pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - let c = int64x1_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6288,20 +5726,17 @@ pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let c = int64x2_t::splat(-1); - simd_or(simd_xor(b, c), a) +pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6310,20 +5745,17 @@ pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c = int8x8_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6332,20 +5764,17 @@ pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c = int8x16_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { + static_assert_uimm_bits!(IMM5, 2); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6354,20 +5783,17 @@ pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c = int16x4_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6376,20 +5802,17 @@ pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c = int16x8_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6398,20 +5821,17 @@ pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c = int32x2_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { + static_assert_uimm_bits!(IMM5, 3); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6420,20 +5840,17 @@ pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c = int32x4_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { + static_assert_uimm_bits!(IMM5, 4); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6442,20 +5859,17 @@ pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c = int64x1_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { + static_assert_uimm_bits!(IMM5, 4); + simd_extract!(v, IMM5 as u32) } -/// Vector bitwise inclusive OR NOT +/// Move vector element to general-purpose register #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6464,19 +5878,19 @@ pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c = int64x2_t::splat(-1); - simd_or(simd_xor(b, transmute(c)), a) +pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { + static_assert_uimm_bits!(IMM5, 4); + simd_extract!(v, IMM5 as u32) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6486,18 +5900,18 @@ pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vpmins_v8i8(a, b) +pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { + simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6507,18 +5921,18 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - vpmins_v4i16(a, b) +pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { + simd_shuffle!(a, a, [4, 5, 6, 7]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6528,18 +5942,18 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - vpmins_v2i32(a, b) +pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { + simd_shuffle!(a, a, [2, 3]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6549,18 +5963,18 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - vpminu_v8i8(a, b) +pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { + int64x1_t([simd_extract!(a, 1)]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6570,18 +5984,18 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - vpminu_v4i16(a, b) +pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { + simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6591,18 +6005,18 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - vpminu_v2i32(a, b) +pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { + simd_shuffle!(a, a, [4, 5, 6, 7]) } -/// Folding minimum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6612,18 +6026,18 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - vpminf_v2f32(a, b) +pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { + simd_shuffle!(a, a, [2, 3]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6633,18 +6047,18 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vpmaxs_v8i8(a, b) +pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { + uint64x1_t([simd_extract!(a, 1)]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6654,18 +6068,18 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - vpmaxs_v4i16(a, b) +pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { + simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6675,18 +6089,18 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - vpmaxs_v2i32(a, b) +pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { + simd_shuffle!(a, a, [4, 5, 6, 7]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -6696,40 +6110,32 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - vpmaxu_v8i8(a, b) +pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { + simd_shuffle!(a, a, [2, 3]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "vget_low_s8", since = "1.60.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - vpmaxu_v4i16(a, b) +pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6738,19 +6144,15 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - vpmaxu_v2i32(a, b) +pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { + simd_shuffle!(a, a, [0, 1, 2, 3]) } -/// Folding maximum of adjacent pairs +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxp) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6759,16 +6161,15 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - vpmaxf_v2f32(a, b) +pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { + simd_shuffle!(a, a, [0, 1]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6777,17 +6178,15 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { + int64x1_t([simd_extract!(a, 0)]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6796,17 +6195,15 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { - static_assert!(IMM5 == 0); - simd_extract!(v, 0) +pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6815,17 +6212,15 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { + simd_shuffle!(a, a, [0, 1, 2, 3]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6834,17 +6229,15 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { + simd_shuffle!(a, a, [0, 1]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6853,17 +6246,15 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { + uint64x1_t([simd_extract!(a, 0)]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6872,17 +6263,15 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6891,17 +6280,15 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { + simd_shuffle!(a, a, [0, 1, 2, 3]) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6910,17 +6297,19 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { + simd_shuffle!(a, a, [0, 1]) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6929,17 +6318,19 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { + int8x16_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6948,17 +6339,19 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { - static_assert!(IMM5 == 0); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { + int16x8_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6967,17 +6360,19 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { + int32x4_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6986,17 +6381,19 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { - static_assert!(IMM5 == 0); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { + int64x2_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7005,17 +6402,19 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { - static_assert_uimm_bits!(IMM5, 1); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { + uint8x16_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7024,17 +6423,19 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { + uint16x8_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7043,17 +6444,19 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { + uint32x4_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7062,17 +6465,19 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { + uint64x2_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7081,17 +6486,19 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { + poly8x16_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7100,17 +6507,19 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { - static_assert_uimm_bits!(IMM5, 2); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { + poly16x8_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7119,36 +6528,39 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { + float32x4_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar +/// +/// Private vfp4 version used by FMA intriniscs because LLVM does +/// not inline the non-vfp4 version in vfp4 functions. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) )] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { + float32x4_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7157,36 +6569,19 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { - static_assert_uimm_bits!(IMM5, 3); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { + int8x8_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) )] -pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { - static_assert_uimm_bits!(IMM5, 4); - simd_extract!(v, IMM5 as u32) -} - -/// Move vector element to general-purpose register -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7195,17 +6590,19 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { - static_assert_uimm_bits!(IMM5, 4); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { + int16x4_t::splat(value) } -/// Move vector element to general-purpose register +/// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7214,9 +6611,8 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { - static_assert_uimm_bits!(IMM5, 4); - simd_extract!(v, IMM5 as u32) +pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { + int32x2_t::splat(value) } /// Duplicate vector element to vector or scalar @@ -7226,7 +6622,7 @@ pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -7236,18 +6632,18 @@ pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { - simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { + int64x1_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7257,18 +6653,18 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { - simd_shuffle!(a, a, [4, 5, 6, 7]) +pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { + uint8x8_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7278,18 +6674,18 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { - simd_shuffle!(a, a, [2, 3]) +pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { + uint16x4_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7299,8 +6695,8 @@ pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { - int64x1_t([simd_extract!(a, 1)]) +pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { + uint32x2_t::splat(value) } /// Duplicate vector element to vector or scalar @@ -7310,7 +6706,7 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -7320,18 +6716,18 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { - simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { + uint64x1_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7341,18 +6737,18 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { - simd_shuffle!(a, a, [4, 5, 6, 7]) +pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { + poly8x8_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7362,18 +6758,18 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { - simd_shuffle!(a, a, [2, 3]) +pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { + poly16x4_t::splat(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -7383,39 +6779,38 @@ pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { - uint64x1_t([simd_extract!(a, 1)]) +pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { + float32x2_t::splat(value) } /// Duplicate vector element to vector or scalar +/// +/// Private vfp4 version used by FMA intriniscs because LLVM does +/// not inline the non-vfp4 version in vfp4 functions. #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { - simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) +unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { + float32x2_t::splat(value) } -/// Duplicate vector element to vector or scalar +/// Load SIMD&FP register (immediate offset) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -7425,18 +6820,18 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { - simd_shuffle!(a, a, [4, 5, 6, 7]) +pub unsafe fn vldrq_p128(a: *const p128) -> p128 { + *a } -/// Duplicate vector element to vector or scalar +/// Store SIMD&FP register (immediate offset) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -7446,32 +6841,40 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { - simd_shuffle!(a, a, [2, 3]) +pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { + *a = b; } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "vget_low_s8", since = "1.60.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { - simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { + vdup_n_s8(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7480,15 +6883,19 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { - simd_shuffle!(a, a, [0, 1, 2, 3]) +pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { + vdup_n_s16(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7497,15 +6904,19 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { - simd_shuffle!(a, a, [0, 1]) +pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { + vdup_n_s32(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmov) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7514,15 +6925,19 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { - int64x1_t([simd_extract!(a, 0)]) +pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { + vdup_n_s64(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7531,15 +6946,19 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { - simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { + vdup_n_u8(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7548,15 +6967,19 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { - simd_shuffle!(a, a, [0, 1, 2, 3]) +pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { + vdup_n_u16(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7565,15 +6988,19 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { - simd_shuffle!(a, a, [0, 1]) +pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { + vdup_n_u32(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmov) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7582,15 +7009,19 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { - uint64x1_t([simd_extract!(a, 0)]) +pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { + vdup_n_u64(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7599,15 +7030,19 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { - simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { + vdup_n_p8(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7616,15 +7051,19 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { - simd_shuffle!(a, a, [0, 1, 2, 3]) +pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { + vdup_n_p16(value) } /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7633,8 +7072,8 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { - simd_shuffle!(a, a, [0, 1]) +pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { + vdup_n_f32(value) } /// Duplicate vector element to vector or scalar @@ -7654,8 +7093,8 @@ pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { - int8x16_t::splat(value) +pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { + vdupq_n_s8(value) } /// Duplicate vector element to vector or scalar @@ -7675,8 +7114,8 @@ pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { - int16x8_t::splat(value) +pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { + vdupq_n_s16(value) } /// Duplicate vector element to vector or scalar @@ -7696,9 +7135,9 @@ pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { - int32x4_t::splat(value) -} +pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { + vdupq_n_s32(value) +} /// Duplicate vector element to vector or scalar #[inline] @@ -7717,8 +7156,8 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { - int64x2_t::splat(value) +pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { + vdupq_n_s64(value) } /// Duplicate vector element to vector or scalar @@ -7738,8 +7177,8 @@ pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { - uint8x16_t::splat(value) +pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { + vdupq_n_u8(value) } /// Duplicate vector element to vector or scalar @@ -7759,8 +7198,8 @@ pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { - uint16x8_t::splat(value) +pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { + vdupq_n_u16(value) } /// Duplicate vector element to vector or scalar @@ -7780,8 +7219,8 @@ pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { - uint32x4_t::splat(value) +pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { + vdupq_n_u32(value) } /// Duplicate vector element to vector or scalar @@ -7801,8 +7240,8 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { - uint64x2_t::splat(value) +pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { + vdupq_n_u64(value) } /// Duplicate vector element to vector or scalar @@ -7822,8 +7261,8 @@ pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { - poly8x16_t::splat(value) +pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { + vdupq_n_p8(value) } /// Duplicate vector element to vector or scalar @@ -7843,8 +7282,8 @@ pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { - poly16x8_t::splat(value) +pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { + vdupq_n_p16(value) } /// Duplicate vector element to vector or scalar @@ -7864,39 +7303,43 @@ pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { - float32x4_t::splat(value) +pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { + vdupq_n_f32(value) } -/// Duplicate vector element to vector or scalar -/// -/// Private vfp4 version used by FMA intriniscs because LLVM does -/// not inline the non-vfp4 version in vfp4 functions. +/// Extract vector from pair of vectors #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr("nop", N = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { - float32x4_t::splat(value) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } -/// Duplicate vector element to vector or scalar +/// Extract vector from pair of vectors #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr("nop", N = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7905,18 +7348,19 @@ unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { - int8x8_t::splat(value) +pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -7926,18 +7370,18 @@ pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { - int16x4_t::splat(value) +pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -7947,18 +7391,18 @@ pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { - int32x2_t::splat(value) +pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -7968,18 +7412,18 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { - int64x1_t::splat(value) +pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -7989,18 +7433,18 @@ pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { - uint8x8_t::splat(value) +pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -8010,18 +7454,18 @@ pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { - uint16x4_t::splat(value) +pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -8031,18 +7475,18 @@ pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { - uint32x2_t::splat(value) +pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8052,18 +7496,18 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { - uint64x1_t::splat(value) +pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8073,18 +7517,18 @@ pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { - poly8x8_t::splat(value) +pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8094,18 +7538,18 @@ pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { - poly16x4_t::splat(value) +pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8115,38 +7559,39 @@ pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { - float32x2_t::splat(value) +pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -/// Duplicate vector element to vector or scalar -/// -/// Private vfp4 version used by FMA intriniscs because LLVM does -/// not inline the non-vfp4 version in vfp4 functions. +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { - float32x2_t::splat(value) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Load SIMD&FP register (immediate offset) +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8156,18 +7601,18 @@ unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vldrq_p128(a: *const p128) -> p128 { - *a +pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Store SIMD&FP register (immediate offset) +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8177,18 +7622,18 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { - *a = b; +pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8198,18 +7643,18 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { - vdup_n_s8(value) +pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8219,18 +7664,18 @@ pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { - vdup_n_s16(value) +pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8240,18 +7685,18 @@ pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { - vdup_n_s32(value) +pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8261,18 +7706,18 @@ pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { - vdup_n_s64(value) +pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -8282,39 +7727,18 @@ pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { - vdup_n_u8(value) -} - -/// Duplicate vector element to vector or scalar -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { - vdup_n_u16(value) +pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8324,18 +7748,18 @@ pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { - vdup_n_u32(value) +pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8345,18 +7769,18 @@ pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { - vdup_n_u64(value) +pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8366,18 +7790,18 @@ pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { - vdup_n_p8(value) +pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { + simd_shuffle!(a, a, [3, 2, 1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8387,18 +7811,18 @@ pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { - vdup_n_p16(value) +pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8408,18 +7832,18 @@ pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { - vdup_n_f32(value) +pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { + simd_shuffle!(a, a, [1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8429,18 +7853,18 @@ pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { - vdupq_n_s8(value) +pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8450,18 +7874,18 @@ pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { - vdupq_n_s16(value) +pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8471,18 +7895,18 @@ pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { - vdupq_n_s32(value) +pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8492,18 +7916,18 @@ pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { - vdupq_n_s64(value) +pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, a, [3, 2, 1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8513,18 +7937,18 @@ pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { - vdupq_n_u8(value) +pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8534,18 +7958,18 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { - vdupq_n_u16(value) +pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, a, [1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8555,18 +7979,18 @@ pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { - vdupq_n_u32(value) +pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8576,18 +8000,18 @@ pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { - vdupq_n_u64(value) +pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { + simd_shuffle!(a, a, [1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8597,18 +8021,18 @@ pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { - vdupq_n_p8(value) +pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { + simd_shuffle!(a, a, [1, 0, 3, 2]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8618,18 +8042,18 @@ pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { - vdupq_n_p16(value) +pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } -/// Duplicate vector element to vector or scalar +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -8639,20 +8063,19 @@ pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { - vdupq_n_f32(value) +pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } -/// Extract vector from pair of vectors +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("nop", N = 0) + assert_instr(rev64) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8661,21 +8084,19 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, a, [3, 2, 1, 0]) } -/// Extract vector from pair of vectors +/// Reversing vector elements (swap endianness) #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("nop", N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr("nop", N = 0) + assert_instr(rev64) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8684,3740 +8105,1656 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } -/// Population count per byte. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { - vcnt_s8_(a) -} -/// Population count per byte. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { - vcntq_s8_(a) -} -/// Population count per byte. +/* FIXME: 16-bit float +/// Vector combine #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vcnt_s8_(transmute(a))) +)] pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { + simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } -/// Population count per byte. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vcntq_s8_(transmute(a))) -} -/// Population count per byte. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - transmute(vcnt_s8_(transmute(a))) -} -/// Population count per byte. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - transmute(vcntq_s8_(transmute(a))) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { - simd_shuffle!(a, a, [3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { - simd_shuffle!(a, a, [1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, a, [3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, a, [1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { - simd_shuffle!(a, a, [1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { - simd_shuffle!(a, a, [1, 0, 3, 2]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { - simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, a, [3, 2, 1, 0]) -} - -/// Reversing vector elements (swap endianness) -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_s8_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_s8_(b), a) - } -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_s16_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_s16_(b), a) - } -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_s32_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_s32_(b), a) - } -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_s8_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_s8_(b), a) - } -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s16))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_s16_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_s16_(b), a) - } -} - -/// Signed Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.s32))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_s32_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_s32_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_u8_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_u8_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_u16_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_u16_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadal_u32_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddl_u32_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_u8_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_u8_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u16))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_u16_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_u16_(b), a) - } -} - -/// Unsigned Add and Accumulate Long Pairwise. -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadal.u32))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - #[cfg(target_arch = "arm")] - { - crate::core_arch::arm::neon::vpadalq_u32_(a, b) - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - simd_add(vpaddlq_u32_(b), a) - } -} - -/// 8-bit integer matrix multiply-accumulate -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smmla) -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" - )] - fn vmmlaq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - vmmlaq_s32_(a, b, c) -} - -/// 8-bit integer matrix multiply-accumulate -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ummla) -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" - )] - fn vmmlaq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; - } - vmmlaq_u32_(a, b, c) -} - -/// Unsigned and signed 8-bit integer matrix multiply-accumulate -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usmmla) -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - #[allow(improper_ctypes)] - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" - )] - fn vusmmlaq_s32_(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; - } - vusmmlaq_s32_(a, b, c) -} - -/* FIXME: 16-bit float -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { - simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} -*/ - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { - simd_shuffle!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { - simd_shuffle!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { - simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { - simd_shuffle!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { - simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { - simd_shuffle!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { - simd_shuffle!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { - simd_shuffle!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { - simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mov) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { - simd_shuffle!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { - simd_shuffle!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { - simd_shuffle!(low, high, [0, 1]) -} - -#[cfg(test)] -mod tests { - use super::*; - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - use crate::core_arch::aarch64::*; - #[cfg(target_arch = "arm")] - use crate::core_arch::arm::*; - use crate::core_arch::arm_shared::test_support::*; - use crate::core_arch::simd::*; - use std::{mem::transmute, vec::Vec}; - use stdarch_test::simd_test; - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_s8() { - let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: i8 = 42; - let e = i8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i8x8 = transmute(vld1_lane_s8::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_s8() { - let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let elem: i8 = 42; - let e = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: i8x16 = transmute(vld1q_lane_s8::<15>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_s16() { - let a = i16x4::new(0, 1, 2, 3); - let elem: i16 = 42; - let e = i16x4::new(0, 1, 2, 42); - let r: i16x4 = transmute(vld1_lane_s16::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_s16() { - let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: i16 = 42; - let e = i16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: i16x8 = transmute(vld1q_lane_s16::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_s32() { - let a = i32x2::new(0, 1); - let elem: i32 = 42; - let e = i32x2::new(0, 42); - let r: i32x2 = transmute(vld1_lane_s32::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_s32() { - let a = i32x4::new(0, 1, 2, 3); - let elem: i32 = 42; - let e = i32x4::new(0, 1, 2, 42); - let r: i32x4 = transmute(vld1q_lane_s32::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_s64() { - let a = i64x1::new(0); - let elem: i64 = 42; - let e = i64x1::new(42); - let r: i64x1 = transmute(vld1_lane_s64::<0>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_s64() { - let a = i64x2::new(0, 1); - let elem: i64 = 42; - let e = i64x2::new(0, 42); - let r: i64x2 = transmute(vld1q_lane_s64::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_u8() { - let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: u8 = 42; - let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_u8::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_u8() { - let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let elem: u8 = 42; - let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_u8::<15>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_u16() { - let a = u16x4::new(0, 1, 2, 3); - let elem: u16 = 42; - let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_u16::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_u16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: u16 = 42; - let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_u16::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_u32() { - let a = u32x2::new(0, 1); - let elem: u32 = 42; - let e = u32x2::new(0, 42); - let r: u32x2 = transmute(vld1_lane_u32::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_u32() { - let a = u32x4::new(0, 1, 2, 3); - let elem: u32 = 42; - let e = u32x4::new(0, 1, 2, 42); - let r: u32x4 = transmute(vld1q_lane_u32::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_u64() { - let a = u64x1::new(0); - let elem: u64 = 42; - let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_lane_u64::<0>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_u64() { - let a = u64x2::new(0, 1); - let elem: u64 = 42; - let e = u64x2::new(0, 42); - let r: u64x2 = transmute(vld1q_lane_u64::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_p8() { - let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: p8 = 42; - let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u8x8 = transmute(vld1_lane_p8::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_p8() { - let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let elem: p8 = 42; - let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); - let r: u8x16 = transmute(vld1q_lane_p8::<15>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_p16() { - let a = u16x4::new(0, 1, 2, 3); - let elem: p16 = 42; - let e = u16x4::new(0, 1, 2, 42); - let r: u16x4 = transmute(vld1_lane_p16::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_p16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let elem: p16 = 42; - let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); - let r: u16x8 = transmute(vld1q_lane_p16::<7>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon,aes")] - unsafe fn test_vld1_lane_p64() { - let a = u64x1::new(0); - let elem: u64 = 42; - let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_lane_p64::<0>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon,aes")] - unsafe fn test_vld1q_lane_p64() { - let a = u64x2::new(0, 1); - let elem: u64 = 42; - let e = u64x2::new(0, 42); - let r: u64x2 = transmute(vld1q_lane_p64::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_lane_f32() { - let a = f32x2::new(0., 1.); - let elem: f32 = 42.; - let e = f32x2::new(0., 42.); - let r: f32x2 = transmute(vld1_lane_f32::<1>(&elem, transmute(a))); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_lane_f32() { - let a = f32x4::new(0., 1., 2., 3.); - let elem: f32 = 42.; - let e = f32x4::new(0., 1., 2., 42.); - let r: f32x4 = transmute(vld1q_lane_f32::<3>(&elem, transmute(a))); - assert_eq!(r, e) - } +*/ + +#[cfg(test)] +mod tests { + use super::*; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + use crate::core_arch::aarch64::*; + #[cfg(target_arch = "arm")] + use crate::core_arch::arm::*; + use crate::core_arch::arm_shared::test_support::*; + use crate::core_arch::simd::*; + use std::{mem::transmute, vec::Vec}; + use stdarch_test::simd_test; #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_s8() { + unsafe fn test_vld1_lane_s8() { + let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i8 = 42; - let e = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: i8x8 = transmute(vld1_dup_s8(&elem)); + let e = i8x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: i8x8 = transmute(vld1_lane_s8::<7>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_s8() { + unsafe fn test_vld1q_lane_s8() { + let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: i8 = 42; - let e = i8x16::new( - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - ); - let r: i8x16 = transmute(vld1q_dup_s8(&elem)); + let e = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); + let r: i8x16 = transmute(vld1q_lane_s8::<15>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_s16() { + unsafe fn test_vld1_lane_s16() { + let a = i16x4::new(0, 1, 2, 3); let elem: i16 = 42; - let e = i16x4::new(42, 42, 42, 42); - let r: i16x4 = transmute(vld1_dup_s16(&elem)); + let e = i16x4::new(0, 1, 2, 42); + let r: i16x4 = transmute(vld1_lane_s16::<3>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_s16() { + unsafe fn test_vld1q_lane_s16() { + let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: i16 = 42; - let e = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: i16x8 = transmute(vld1q_dup_s16(&elem)); + let e = i16x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: i16x8 = transmute(vld1q_lane_s16::<7>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_s32() { + unsafe fn test_vld1_lane_s32() { + let a = i32x2::new(0, 1); let elem: i32 = 42; - let e = i32x2::new(42, 42); - let r: i32x2 = transmute(vld1_dup_s32(&elem)); + let e = i32x2::new(0, 42); + let r: i32x2 = transmute(vld1_lane_s32::<1>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_s32() { + unsafe fn test_vld1q_lane_s32() { + let a = i32x4::new(0, 1, 2, 3); let elem: i32 = 42; - let e = i32x4::new(42, 42, 42, 42); - let r: i32x4 = transmute(vld1q_dup_s32(&elem)); + let e = i32x4::new(0, 1, 2, 42); + let r: i32x4 = transmute(vld1q_lane_s32::<3>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_s64() { + unsafe fn test_vld1_lane_s64() { + let a = i64x1::new(0); let elem: i64 = 42; let e = i64x1::new(42); - let r: i64x1 = transmute(vld1_dup_s64(&elem)); + let r: i64x1 = transmute(vld1_lane_s64::<0>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_s64() { + unsafe fn test_vld1q_lane_s64() { + let a = i64x2::new(0, 1); let elem: i64 = 42; - let e = i64x2::new(42, 42); - let r: i64x2 = transmute(vld1q_dup_s64(&elem)); + let e = i64x2::new(0, 42); + let r: i64x2 = transmute(vld1q_lane_s64::<1>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_u8() { + unsafe fn test_vld1_lane_u8() { + let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u8 = 42; - let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: u8x8 = transmute(vld1_dup_u8(&elem)); + let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: u8x8 = transmute(vld1_lane_u8::<7>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_u8() { + unsafe fn test_vld1q_lane_u8() { + let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: u8 = 42; - let e = u8x16::new( - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - ); - let r: u8x16 = transmute(vld1q_dup_u8(&elem)); + let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); + let r: u8x16 = transmute(vld1q_lane_u8::<15>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_u16() { + unsafe fn test_vld1_lane_u16() { + let a = u16x4::new(0, 1, 2, 3); let elem: u16 = 42; - let e = u16x4::new(42, 42, 42, 42); - let r: u16x4 = transmute(vld1_dup_u16(&elem)); + let e = u16x4::new(0, 1, 2, 42); + let r: u16x4 = transmute(vld1_lane_u16::<3>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_u16() { + unsafe fn test_vld1q_lane_u16() { + let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: u16 = 42; - let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: u16x8 = transmute(vld1q_dup_u16(&elem)); + let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: u16x8 = transmute(vld1q_lane_u16::<7>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_u32() { + unsafe fn test_vld1_lane_u32() { + let a = u32x2::new(0, 1); let elem: u32 = 42; - let e = u32x2::new(42, 42); - let r: u32x2 = transmute(vld1_dup_u32(&elem)); + let e = u32x2::new(0, 42); + let r: u32x2 = transmute(vld1_lane_u32::<1>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_u32() { + unsafe fn test_vld1q_lane_u32() { + let a = u32x4::new(0, 1, 2, 3); let elem: u32 = 42; - let e = u32x4::new(42, 42, 42, 42); - let r: u32x4 = transmute(vld1q_dup_u32(&elem)); + let e = u32x4::new(0, 1, 2, 42); + let r: u32x4 = transmute(vld1q_lane_u32::<3>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_u64() { + unsafe fn test_vld1_lane_u64() { + let a = u64x1::new(0); let elem: u64 = 42; let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_dup_u64(&elem)); + let r: u64x1 = transmute(vld1_lane_u64::<0>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_u64() { + unsafe fn test_vld1q_lane_u64() { + let a = u64x2::new(0, 1); let elem: u64 = 42; - let e = u64x2::new(42, 42); - let r: u64x2 = transmute(vld1q_dup_u64(&elem)); + let e = u64x2::new(0, 42); + let r: u64x2 = transmute(vld1q_lane_u64::<1>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_p8() { + unsafe fn test_vld1_lane_p8() { + let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); let elem: p8 = 42; - let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: u8x8 = transmute(vld1_dup_p8(&elem)); + let e = u8x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: u8x8 = transmute(vld1_lane_p8::<7>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_p8() { + unsafe fn test_vld1q_lane_p8() { + let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let elem: p8 = 42; - let e = u8x16::new( - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - ); - let r: u8x16 = transmute(vld1q_dup_p8(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_p16() { - let elem: p16 = 42; - let e = u16x4::new(42, 42, 42, 42); - let r: u16x4 = transmute(vld1_dup_p16(&elem)); + let e = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 42); + let r: u8x16 = transmute(vld1q_lane_p8::<15>(&elem, transmute(a))); assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_p16() { + unsafe fn test_vld1_lane_p16() { + let a = u16x4::new(0, 1, 2, 3); let elem: p16 = 42; - let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let r: u16x8 = transmute(vld1q_dup_p16(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon,aes")] - unsafe fn test_vld1_dup_p64() { - let elem: u64 = 42; - let e = u64x1::new(42); - let r: u64x1 = transmute(vld1_dup_p64(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon,aes")] - unsafe fn test_vld1q_dup_p64() { - let elem: u64 = 42; - let e = u64x2::new(42, 42); - let r: u64x2 = transmute(vld1q_dup_p64(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1_dup_f32() { - let elem: f32 = 42.; - let e = f32x2::new(42., 42.); - let r: f32x2 = transmute(vld1_dup_f32(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vld1q_dup_f32() { - let elem: f32 = 42.; - let e = f32x4::new(42., 42., 42., 42.); - let r: f32x4 = transmute(vld1q_dup_f32(&elem)); - assert_eq!(r, e) - } - - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_u8() { - let v = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vget_lane_u8::<1>(transmute(v)); - assert_eq!(r, 2); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_u32() { - let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_u32::<1>(transmute(v)); - assert_eq!(r, 2); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_s32() { - let v = i32x4::new(1, 2, 3, 4); - let r = vgetq_lane_s32::<1>(transmute(v)); - assert_eq!(r, 2); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_u64() { - let v: u64 = 1; - let r = vget_lane_u64::<0>(transmute(v)); - assert_eq!(r, 1); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_u16() { - let v = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r = vgetq_lane_u16::<1>(transmute(v)); - assert_eq!(r, 2); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_s8() { - let v = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let r = vget_lane_s8::<2>(transmute(v)); - assert_eq!(r, 2); - let r = vget_lane_s8::<4>(transmute(v)); - assert_eq!(r, 4); - let r = vget_lane_s8::<5>(transmute(v)); - assert_eq!(r, 5); - } - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_p8() { - let v = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let r = vget_lane_p8::<2>(transmute(v)); - assert_eq!(r, 2); - let r = vget_lane_p8::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vget_lane_p8::<5>(transmute(v)); - assert_eq!(r, 5); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_p16() { - let v = u16x4::new(0, 1, 2, 3); - let r = vget_lane_p16::<2>(transmute(v)); - assert_eq!(r, 2); - let r = vget_lane_p16::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vget_lane_p16::<0>(transmute(v)); - assert_eq!(r, 0); - let r = vget_lane_p16::<1>(transmute(v)); - assert_eq!(r, 1); + let e = u16x4::new(0, 1, 2, 42); + let r: u16x4 = transmute(vld1_lane_p16::<3>(&elem, transmute(a))); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_s16() { - let v = i16x4::new(0, 1, 2, 3); - let r = vget_lane_s16::<2>(transmute(v)); - assert_eq!(r, 2); - let r = vget_lane_s16::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vget_lane_s16::<0>(transmute(v)); - assert_eq!(r, 0); - let r = vget_lane_s16::<1>(transmute(v)); - assert_eq!(r, 1); + unsafe fn test_vld1q_lane_p16() { + let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let elem: p16 = 42; + let e = u16x8::new(0, 1, 2, 3, 4, 5, 6, 42); + let r: u16x8 = transmute(vld1q_lane_p16::<7>(&elem, transmute(a))); + assert_eq!(r, e) } - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_u16() { - let v = u16x4::new(0, 1, 2, 3); - let r = vget_lane_u16::<2>(transmute(v)); - assert_eq!(r, 2); - let r = vget_lane_u16::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vget_lane_u16::<0>(transmute(v)); - assert_eq!(r, 0); - let r = vget_lane_u16::<1>(transmute(v)); - assert_eq!(r, 1); + #[simd_test(enable = "neon,aes")] + unsafe fn test_vld1_lane_p64() { + let a = u64x1::new(0); + let elem: u64 = 42; + let e = u64x1::new(42); + let r: u64x1 = transmute(vld1_lane_p64::<0>(&elem, transmute(a))); + assert_eq!(r, e) } - #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_f32() { - let v = f32x2::new(0.0, 1.0); - let r = vget_lane_f32::<1>(transmute(v)); - assert_eq!(r, 1.0); - let r = vget_lane_f32::<0>(transmute(v)); - assert_eq!(r, 0.0); + + #[simd_test(enable = "neon,aes")] + unsafe fn test_vld1q_lane_p64() { + let a = u64x2::new(0, 1); + let elem: u64 = 42; + let e = u64x2::new(0, 42); + let r: u64x2 = transmute(vld1q_lane_p64::<1>(&elem, transmute(a))); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_s32() { - let v = i32x2::new(0, 1); - let r = vget_lane_s32::<1>(transmute(v)); - assert_eq!(r, 1); - let r = vget_lane_s32::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1_lane_f32() { + let a = f32x2::new(0., 1.); + let elem: f32 = 42.; + let e = f32x2::new(0., 42.); + let r: f32x2 = transmute(vld1_lane_f32::<1>(&elem, transmute(a))); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_u32() { - let v = u32x2::new(0, 1); - let r = vget_lane_u32::<1>(transmute(v)); - assert_eq!(r, 1); - let r = vget_lane_u32::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1q_lane_f32() { + let a = f32x4::new(0., 1., 2., 3.); + let elem: f32 = 42.; + let e = f32x4::new(0., 1., 2., 42.); + let r: f32x4 = transmute(vld1q_lane_f32::<3>(&elem, transmute(a))); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_s64() { - let v = i64x1::new(1); - let r = vget_lane_s64::<0>(transmute(v)); - assert_eq!(r, 1); + unsafe fn test_vld1_dup_s8() { + let elem: i8 = 42; + let e = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: i8x8 = transmute(vld1_dup_s8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_lane_p64() { - let v = u64x1::new(1); - let r = vget_lane_p64::<0>(transmute(v)); - assert_eq!(r, 1); + unsafe fn test_vld1q_dup_s8() { + let elem: i8 = 42; + let e = i8x16::new( + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + ); + let r: i8x16 = transmute(vld1q_dup_s8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_s8() { - let v = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let r = vgetq_lane_s8::<7>(transmute(v)); - assert_eq!(r, 7); - let r = vgetq_lane_s8::<13>(transmute(v)); - assert_eq!(r, 13); - let r = vgetq_lane_s8::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vgetq_lane_s8::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1_dup_s16() { + let elem: i16 = 42; + let e = i16x4::new(42, 42, 42, 42); + let r: i16x4 = transmute(vld1_dup_s16(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_p8() { - let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let r = vgetq_lane_p8::<7>(transmute(v)); - assert_eq!(r, 7); - let r = vgetq_lane_p8::<13>(transmute(v)); - assert_eq!(r, 13); - let r = vgetq_lane_p8::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vgetq_lane_p8::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1q_dup_s16() { + let elem: i16 = 42; + let e = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: i16x8 = transmute(vld1q_dup_s16(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_u8() { - let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let r = vgetq_lane_u8::<7>(transmute(v)); - assert_eq!(r, 7); - let r = vgetq_lane_u8::<13>(transmute(v)); - assert_eq!(r, 13); - let r = vgetq_lane_u8::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vgetq_lane_u8::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1_dup_s32() { + let elem: i32 = 42; + let e = i32x2::new(42, 42); + let r: i32x2 = transmute(vld1_dup_s32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_s16() { - let v = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let r = vgetq_lane_s16::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vgetq_lane_s16::<6>(transmute(v)); - assert_eq!(r, 6); - let r = vgetq_lane_s16::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1q_dup_s32() { + let elem: i32 = 42; + let e = i32x4::new(42, 42, 42, 42); + let r: i32x4 = transmute(vld1q_dup_s32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_p16() { - let v = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let r = vgetq_lane_p16::<3>(transmute(v)); - assert_eq!(r, 3); - let r = vgetq_lane_p16::<7>(transmute(v)); - assert_eq!(r, 7); - let r = vgetq_lane_p16::<1>(transmute(v)); - assert_eq!(r, 1); + unsafe fn test_vld1_dup_s64() { + let elem: i64 = 42; + let e = i64x1::new(42); + let r: i64x1 = transmute(vld1_dup_s64(&elem)); + assert_eq!(r, e) } + #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_f32() { - let v = f32x4::new(0.0, 1.0, 2.0, 3.0); - let r = vgetq_lane_f32::<3>(transmute(v)); - assert_eq!(r, 3.0); - let r = vgetq_lane_f32::<0>(transmute(v)); - assert_eq!(r, 0.0); - let r = vgetq_lane_f32::<2>(transmute(v)); - assert_eq!(r, 2.0); - let r = vgetq_lane_f32::<1>(transmute(v)); - assert_eq!(r, 1.0); + unsafe fn test_vld1q_dup_s64() { + let elem: i64 = 42; + let e = i64x2::new(42, 42); + let r: i64x2 = transmute(vld1q_dup_s64(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_s64() { - let v = i64x2::new(0, 1); - let r = vgetq_lane_s64::<1>(transmute(v)); - assert_eq!(r, 1); - let r = vgetq_lane_s64::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1_dup_u8() { + let elem: u8 = 42; + let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: u8x8 = transmute(vld1_dup_u8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_p64() { - let v = u64x2::new(0, 1); - let r = vgetq_lane_p64::<1>(transmute(v)); - assert_eq!(r, 1); - let r = vgetq_lane_p64::<0>(transmute(v)); - assert_eq!(r, 0); + unsafe fn test_vld1q_dup_u8() { + let elem: u8 = 42; + let e = u8x16::new( + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + ); + let r: u8x16 = transmute(vld1q_dup_u8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vext_s64() { - let a: i64x1 = i64x1::new(0); - let b: i64x1 = i64x1::new(1); - let e: i64x1 = i64x1::new(0); - let r: i64x1 = transmute(vext_s64::<0>(transmute(a), transmute(b))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_u16() { + let elem: u16 = 42; + let e = u16x4::new(42, 42, 42, 42); + let r: u16x4 = transmute(vld1_dup_u16(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vext_u64() { - let a: u64x1 = u64x1::new(0); - let b: u64x1 = u64x1::new(1); - let e: u64x1 = u64x1::new(0); - let r: u64x1 = transmute(vext_u64::<0>(transmute(a), transmute(b))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_u16() { + let elem: u16 = 42; + let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: u16x8 = transmute(vld1q_dup_u16(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_s8() { - let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = i8x8::new(9, 10, 11, 12, 13, 14, 15, 16); - let r: i8x8 = transmute(vget_high_s8(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_u32() { + let elem: u32 = 42; + let e = u32x2::new(42, 42); + let r: u32x2 = transmute(vld1_dup_u32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_s16() { - let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = i16x4::new(5, 6, 7, 8); - let r: i16x4 = transmute(vget_high_s16(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_u32() { + let elem: u32 = 42; + let e = u32x4::new(42, 42, 42, 42); + let r: u32x4 = transmute(vld1q_dup_u32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_s32() { - let a = i32x4::new(1, 2, 3, 4); - let e = i32x2::new(3, 4); - let r: i32x2 = transmute(vget_high_s32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_u64() { + let elem: u64 = 42; + let e = u64x1::new(42); + let r: u64x1 = transmute(vld1_dup_u64(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_s64() { - let a = i64x2::new(1, 2); - let e = i64x1::new(2); - let r: i64x1 = transmute(vget_high_s64(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_u64() { + let elem: u64 = 42; + let e = u64x2::new(42, 42); + let r: u64x2 = transmute(vld1q_dup_u64(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_u8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16); - let r: u8x8 = transmute(vget_high_u8(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_p8() { + let elem: p8 = 42; + let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: u8x8 = transmute(vld1_dup_p8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_u16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = u16x4::new(5, 6, 7, 8); - let r: u16x4 = transmute(vget_high_u16(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_p8() { + let elem: p8 = 42; + let e = u8x16::new( + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + ); + let r: u8x16 = transmute(vld1q_dup_p8(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_u32() { - let a = u32x4::new(1, 2, 3, 4); - let e = u32x2::new(3, 4); - let r: u32x2 = transmute(vget_high_u32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_p16() { + let elem: p16 = 42; + let e = u16x4::new(42, 42, 42, 42); + let r: u16x4 = transmute(vld1_dup_p16(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_u64() { - let a = u64x2::new(1, 2); - let e = u64x1::new(2); - let r: u64x1 = transmute(vget_high_u64(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_p16() { + let elem: p16 = 42; + let e = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42); + let r: u16x8 = transmute(vld1q_dup_p16(&elem)); + assert_eq!(r, e) } - #[simd_test(enable = "neon")] - unsafe fn test_vget_high_p8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16); - let r: u8x8 = transmute(vget_high_p8(transmute(a))); - assert_eq!(r, e); + #[simd_test(enable = "neon,aes")] + unsafe fn test_vld1_dup_p64() { + let elem: u64 = 42; + let e = u64x1::new(42); + let r: u64x1 = transmute(vld1_dup_p64(&elem)); + assert_eq!(r, e) } - #[simd_test(enable = "neon")] - unsafe fn test_vget_high_p16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = u16x4::new(5, 6, 7, 8); - let r: u16x4 = transmute(vget_high_p16(transmute(a))); - assert_eq!(r, e); + #[simd_test(enable = "neon,aes")] + unsafe fn test_vld1q_dup_p64() { + let elem: u64 = 42; + let e = u64x2::new(42, 42); + let r: u64x2 = transmute(vld1q_dup_p64(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_high_f32() { - let a = f32x4::new(1.0, 2.0, 3.0, 4.0); - let e = f32x2::new(3.0, 4.0); - let r: f32x2 = transmute(vget_high_f32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1_dup_f32() { + let elem: f32 = 42.; + let e = f32x2::new(42., 42.); + let r: f32x2 = transmute(vld1_dup_f32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_s8() { - let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: i8x8 = transmute(vget_low_s8(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vld1q_dup_f32() { + let elem: f32 = 42.; + let e = f32x4::new(42., 42., 42., 42.); + let r: f32x4 = transmute(vld1q_dup_f32(&elem)); + assert_eq!(r, e) } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_s16() { - let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = i16x4::new(1, 2, 3, 4); - let r: i16x4 = transmute(vget_low_s16(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_u8() { + let v = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let r = vget_lane_u8::<1>(transmute(v)); + assert_eq!(r, 2); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_s32() { - let a = i32x4::new(1, 2, 3, 4); - let e = i32x2::new(1, 2); - let r: i32x2 = transmute(vget_low_s32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_u32() { + let v = i32x4::new(1, 2, 3, 4); + let r = vgetq_lane_u32::<1>(transmute(v)); + assert_eq!(r, 2); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_s64() { - let a = i64x2::new(1, 2); - let e = i64x1::new(1); - let r: i64x1 = transmute(vget_low_s64(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_s32() { + let v = i32x4::new(1, 2, 3, 4); + let r = vgetq_lane_s32::<1>(transmute(v)); + assert_eq!(r, 2); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_u8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: u8x8 = transmute(vget_low_u8(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_u64() { + let v: u64 = 1; + let r = vget_lane_u64::<0>(transmute(v)); + assert_eq!(r, 1); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_u16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = u16x4::new(1, 2, 3, 4); - let r: u16x4 = transmute(vget_low_u16(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_u16() { + let v = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let r = vgetq_lane_u16::<1>(transmute(v)); + assert_eq!(r, 2); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_u32() { - let a = u32x4::new(1, 2, 3, 4); - let e = u32x2::new(1, 2); - let r: u32x2 = transmute(vget_low_u32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_s8() { + let v = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let r = vget_lane_s8::<2>(transmute(v)); + assert_eq!(r, 2); + let r = vget_lane_s8::<4>(transmute(v)); + assert_eq!(r, 4); + let r = vget_lane_s8::<5>(transmute(v)); + assert_eq!(r, 5); } - #[simd_test(enable = "neon")] - unsafe fn test_vget_low_u64() { - let a = u64x2::new(1, 2); - let e = u64x1::new(1); - let r: u64x1 = transmute(vget_low_u64(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_p8() { + let v = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let r = vget_lane_p8::<2>(transmute(v)); + assert_eq!(r, 2); + let r = vget_lane_p8::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vget_lane_p8::<5>(transmute(v)); + assert_eq!(r, 5); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_p8() { - let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let r: u8x8 = transmute(vget_low_p8(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_p16() { + let v = u16x4::new(0, 1, 2, 3); + let r = vget_lane_p16::<2>(transmute(v)); + assert_eq!(r, 2); + let r = vget_lane_p16::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vget_lane_p16::<0>(transmute(v)); + assert_eq!(r, 0); + let r = vget_lane_p16::<1>(transmute(v)); + assert_eq!(r, 1); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_p16() { - let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let e = u16x4::new(1, 2, 3, 4); - let r: u16x4 = transmute(vget_low_p16(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_s16() { + let v = i16x4::new(0, 1, 2, 3); + let r = vget_lane_s16::<2>(transmute(v)); + assert_eq!(r, 2); + let r = vget_lane_s16::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vget_lane_s16::<0>(transmute(v)); + assert_eq!(r, 0); + let r = vget_lane_s16::<1>(transmute(v)); + assert_eq!(r, 1); } #[simd_test(enable = "neon")] - unsafe fn test_vget_low_f32() { - let a = f32x4::new(1.0, 2.0, 3.0, 4.0); - let e = f32x2::new(1.0, 2.0); - let r: f32x2 = transmute(vget_low_f32(transmute(a))); - assert_eq!(r, e); + unsafe fn test_vget_lane_u16() { + let v = u16x4::new(0, 1, 2, 3); + let r = vget_lane_u16::<2>(transmute(v)); + assert_eq!(r, 2); + let r = vget_lane_u16::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vget_lane_u16::<0>(transmute(v)); + assert_eq!(r, 0); + let r = vget_lane_u16::<1>(transmute(v)); + assert_eq!(r, 1); + } + #[simd_test(enable = "neon")] + unsafe fn test_vget_lane_f32() { + let v = f32x2::new(0.0, 1.0); + let r = vget_lane_f32::<1>(transmute(v)); + assert_eq!(r, 1.0); + let r = vget_lane_f32::<0>(transmute(v)); + assert_eq!(r, 0.0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_s8() { - let v: i8 = 42; - let e = i8x16::new( - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - ); - let r: i8x16 = transmute(vdupq_n_s8(v)); - assert_eq!(r, e); + unsafe fn test_vget_lane_s32() { + let v = i32x2::new(0, 1); + let r = vget_lane_s32::<1>(transmute(v)); + assert_eq!(r, 1); + let r = vget_lane_s32::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_s16() { - let v: i16 = 64; - let e = i16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: i16x8 = transmute(vdupq_n_s16(v)); - assert_eq!(r, e); + unsafe fn test_vget_lane_u32() { + let v = u32x2::new(0, 1); + let r = vget_lane_u32::<1>(transmute(v)); + assert_eq!(r, 1); + let r = vget_lane_u32::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_s32() { - let v: i32 = 64; - let e = i32x4::new(64, 64, 64, 64); - let r: i32x4 = transmute(vdupq_n_s32(v)); - assert_eq!(r, e); + unsafe fn test_vget_lane_s64() { + let v = i64x1::new(1); + let r = vget_lane_s64::<0>(transmute(v)); + assert_eq!(r, 1); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_s64() { - let v: i64 = 64; - let e = i64x2::new(64, 64); - let r: i64x2 = transmute(vdupq_n_s64(v)); - assert_eq!(r, e); + unsafe fn test_vget_lane_p64() { + let v = u64x1::new(1); + let r = vget_lane_p64::<0>(transmute(v)); + assert_eq!(r, 1); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_u8() { - let v: u8 = 64; - let e = u8x16::new( - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - ); - let r: u8x16 = transmute(vdupq_n_u8(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_s8() { + let v = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let r = vgetq_lane_s8::<7>(transmute(v)); + assert_eq!(r, 7); + let r = vgetq_lane_s8::<13>(transmute(v)); + assert_eq!(r, 13); + let r = vgetq_lane_s8::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vgetq_lane_s8::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_u16() { - let v: u16 = 64; - let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u16x8 = transmute(vdupq_n_u16(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_p8() { + let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let r = vgetq_lane_p8::<7>(transmute(v)); + assert_eq!(r, 7); + let r = vgetq_lane_p8::<13>(transmute(v)); + assert_eq!(r, 13); + let r = vgetq_lane_p8::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vgetq_lane_p8::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_u32() { - let v: u32 = 64; - let e = u32x4::new(64, 64, 64, 64); - let r: u32x4 = transmute(vdupq_n_u32(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_u8() { + let v = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let r = vgetq_lane_u8::<7>(transmute(v)); + assert_eq!(r, 7); + let r = vgetq_lane_u8::<13>(transmute(v)); + assert_eq!(r, 13); + let r = vgetq_lane_u8::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vgetq_lane_u8::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_u64() { - let v: u64 = 64; - let e = u64x2::new(64, 64); - let r: u64x2 = transmute(vdupq_n_u64(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_s16() { + let v = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let r = vgetq_lane_s16::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vgetq_lane_s16::<6>(transmute(v)); + assert_eq!(r, 6); + let r = vgetq_lane_s16::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_p8() { - let v: p8 = 64; - let e = u8x16::new( - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - ); - let r: u8x16 = transmute(vdupq_n_p8(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_p16() { + let v = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let r = vgetq_lane_p16::<3>(transmute(v)); + assert_eq!(r, 3); + let r = vgetq_lane_p16::<7>(transmute(v)); + assert_eq!(r, 7); + let r = vgetq_lane_p16::<1>(transmute(v)); + assert_eq!(r, 1); + } + #[simd_test(enable = "neon")] + unsafe fn test_vgetq_lane_f32() { + let v = f32x4::new(0.0, 1.0, 2.0, 3.0); + let r = vgetq_lane_f32::<3>(transmute(v)); + assert_eq!(r, 3.0); + let r = vgetq_lane_f32::<0>(transmute(v)); + assert_eq!(r, 0.0); + let r = vgetq_lane_f32::<2>(transmute(v)); + assert_eq!(r, 2.0); + let r = vgetq_lane_f32::<1>(transmute(v)); + assert_eq!(r, 1.0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_p16() { - let v: p16 = 64; - let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u16x8 = transmute(vdupq_n_p16(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_s64() { + let v = i64x2::new(0, 1); + let r = vgetq_lane_s64::<1>(transmute(v)); + assert_eq!(r, 1); + let r = vgetq_lane_s64::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdupq_n_f32() { - let v: f32 = 64.0; - let e = f32x4::new(64.0, 64.0, 64.0, 64.0); - let r: f32x4 = transmute(vdupq_n_f32(v)); - assert_eq!(r, e); + unsafe fn test_vgetq_lane_p64() { + let v = u64x2::new(0, 1); + let r = vgetq_lane_p64::<1>(transmute(v)); + assert_eq!(r, 1); + let r = vgetq_lane_p64::<0>(transmute(v)); + assert_eq!(r, 0); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_s8() { - let v: i8 = 64; - let e = i8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: i8x8 = transmute(vdup_n_s8(v)); + unsafe fn test_vext_s64() { + let a: i64x1 = i64x1::new(0); + let b: i64x1 = i64x1::new(1); + let e: i64x1 = i64x1::new(0); + let r: i64x1 = transmute(vext_s64::<0>(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_s16() { - let v: i16 = 64; - let e = i16x4::new(64, 64, 64, 64); - let r: i16x4 = transmute(vdup_n_s16(v)); + unsafe fn test_vext_u64() { + let a: u64x1 = u64x1::new(0); + let b: u64x1 = u64x1::new(1); + let e: u64x1 = u64x1::new(0); + let r: u64x1 = transmute(vext_u64::<0>(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_s32() { - let v: i32 = 64; - let e = i32x2::new(64, 64); - let r: i32x2 = transmute(vdup_n_s32(v)); + unsafe fn test_vget_high_s8() { + let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = i8x8::new(9, 10, 11, 12, 13, 14, 15, 16); + let r: i8x8 = transmute(vget_high_s8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_s64() { - let v: i64 = 64; - let e = i64x1::new(64); - let r: i64x1 = transmute(vdup_n_s64(v)); + unsafe fn test_vget_high_s16() { + let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = i16x4::new(5, 6, 7, 8); + let r: i16x4 = transmute(vget_high_s16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_u8() { - let v: u8 = 64; - let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u8x8 = transmute(vdup_n_u8(v)); + unsafe fn test_vget_high_s32() { + let a = i32x4::new(1, 2, 3, 4); + let e = i32x2::new(3, 4); + let r: i32x2 = transmute(vget_high_s32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_u16() { - let v: u16 = 64; - let e = u16x4::new(64, 64, 64, 64); - let r: u16x4 = transmute(vdup_n_u16(v)); + unsafe fn test_vget_high_s64() { + let a = i64x2::new(1, 2); + let e = i64x1::new(2); + let r: i64x1 = transmute(vget_high_s64(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_u32() { - let v: u32 = 64; - let e = u32x2::new(64, 64); - let r: u32x2 = transmute(vdup_n_u32(v)); + unsafe fn test_vget_high_u8() { + let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16); + let r: u8x8 = transmute(vget_high_u8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_u64() { - let v: u64 = 64; - let e = u64x1::new(64); - let r: u64x1 = transmute(vdup_n_u64(v)); + unsafe fn test_vget_high_u16() { + let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = u16x4::new(5, 6, 7, 8); + let r: u16x4 = transmute(vget_high_u16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_p8() { - let v: p8 = 64; - let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u8x8 = transmute(vdup_n_p8(v)); + unsafe fn test_vget_high_u32() { + let a = u32x4::new(1, 2, 3, 4); + let e = u32x2::new(3, 4); + let r: u32x2 = transmute(vget_high_u32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_p16() { - let v: p16 = 64; - let e = u16x4::new(64, 64, 64, 64); - let r: u16x4 = transmute(vdup_n_p16(v)); + unsafe fn test_vget_high_u64() { + let a = u64x2::new(1, 2); + let e = u64x1::new(2); + let r: u64x1 = transmute(vget_high_u64(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vdup_n_f32() { - let v: f32 = 64.0; - let e = f32x2::new(64.0, 64.0); - let r: f32x2 = transmute(vdup_n_f32(v)); + unsafe fn test_vget_high_p8() { + let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = u8x8::new(9, 10, 11, 12, 13, 14, 15, 16); + let r: u8x8 = transmute(vget_high_p8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vldrq_p128() { - let v: [p128; 2] = [1, 2]; - let e: p128 = 2; - let r: p128 = vldrq_p128(v[1..].as_ptr()); + unsafe fn test_vget_high_p16() { + let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = u16x4::new(5, 6, 7, 8); + let r: u16x4 = transmute(vget_high_p16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vstrq_p128() { - let v: [p128; 2] = [1, 2]; - let e: p128 = 2; - let mut r: p128 = 1; - vstrq_p128(&mut r, v[1]); + unsafe fn test_vget_high_f32() { + let a = f32x4::new(1.0, 2.0, 3.0, 4.0); + let e = f32x2::new(3.0, 4.0); + let r: f32x2 = transmute(vget_high_f32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_s8() { - let v: i8 = 64; - let e = i8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: i8x8 = transmute(vmov_n_s8(v)); + unsafe fn test_vget_low_s8() { + let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let r: i8x8 = transmute(vget_low_s8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_s16() { - let v: i16 = 64; - let e = i16x4::new(64, 64, 64, 64); - let r: i16x4 = transmute(vmov_n_s16(v)); + unsafe fn test_vget_low_s16() { + let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = i16x4::new(1, 2, 3, 4); + let r: i16x4 = transmute(vget_low_s16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_s32() { - let v: i32 = 64; - let e = i32x2::new(64, 64); - let r: i32x2 = transmute(vmov_n_s32(v)); + unsafe fn test_vget_low_s32() { + let a = i32x4::new(1, 2, 3, 4); + let e = i32x2::new(1, 2); + let r: i32x2 = transmute(vget_low_s32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_s64() { - let v: i64 = 64; - let e = i64x1::new(64); - let r: i64x1 = transmute(vmov_n_s64(v)); + unsafe fn test_vget_low_s64() { + let a = i64x2::new(1, 2); + let e = i64x1::new(1); + let r: i64x1 = transmute(vget_low_s64(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_u8() { - let v: u8 = 64; - let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u8x8 = transmute(vmov_n_u8(v)); + unsafe fn test_vget_low_u8() { + let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let r: u8x8 = transmute(vget_low_u8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_u16() { - let v: u16 = 64; - let e = u16x4::new(64, 64, 64, 64); - let r: u16x4 = transmute(vmov_n_u16(v)); + unsafe fn test_vget_low_u16() { + let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = u16x4::new(1, 2, 3, 4); + let r: u16x4 = transmute(vget_low_u16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_u32() { - let v: u32 = 64; - let e = u32x2::new(64, 64); - let r: u32x2 = transmute(vmov_n_u32(v)); + unsafe fn test_vget_low_u32() { + let a = u32x4::new(1, 2, 3, 4); + let e = u32x2::new(1, 2); + let r: u32x2 = transmute(vget_low_u32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_u64() { - let v: u64 = 64; - let e = u64x1::new(64); - let r: u64x1 = transmute(vmov_n_u64(v)); + unsafe fn test_vget_low_u64() { + let a = u64x2::new(1, 2); + let e = u64x1::new(1); + let r: u64x1 = transmute(vget_low_u64(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_p8() { - let v: p8 = 64; - let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u8x8 = transmute(vmov_n_p8(v)); + unsafe fn test_vget_low_p8() { + let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let e = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let r: u8x8 = transmute(vget_low_p8(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_p16() { - let v: p16 = 64; - let e = u16x4::new(64, 64, 64, 64); - let r: u16x4 = transmute(vmov_n_p16(v)); + unsafe fn test_vget_low_p16() { + let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8); + let e = u16x4::new(1, 2, 3, 4); + let r: u16x4 = transmute(vget_low_p16(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmov_n_f32() { - let v: f32 = 64.0; - let e = f32x2::new(64.0, 64.0); - let r: f32x2 = transmute(vmov_n_f32(v)); + unsafe fn test_vget_low_f32() { + let a = f32x4::new(1.0, 2.0, 3.0, 4.0); + let e = f32x2::new(1.0, 2.0); + let r: f32x2 = transmute(vget_low_f32(transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_s8() { - let v: i8 = 64; + unsafe fn test_vdupq_n_s8() { + let v: i8 = 42; let e = i8x16::new( - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, ); - let r: i8x16 = transmute(vmovq_n_s8(v)); + let r: i8x16 = transmute(vdupq_n_s8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_s16() { + unsafe fn test_vdupq_n_s16() { let v: i16 = 64; let e = i16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: i16x8 = transmute(vmovq_n_s16(v)); + let r: i16x8 = transmute(vdupq_n_s16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_s32() { + unsafe fn test_vdupq_n_s32() { let v: i32 = 64; let e = i32x4::new(64, 64, 64, 64); - let r: i32x4 = transmute(vmovq_n_s32(v)); + let r: i32x4 = transmute(vdupq_n_s32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_s64() { + unsafe fn test_vdupq_n_s64() { let v: i64 = 64; let e = i64x2::new(64, 64); - let r: i64x2 = transmute(vmovq_n_s64(v)); + let r: i64x2 = transmute(vdupq_n_s64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_u8() { + unsafe fn test_vdupq_n_u8() { let v: u8 = 64; let e = u8x16::new( 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, ); - let r: u8x16 = transmute(vmovq_n_u8(v)); + let r: u8x16 = transmute(vdupq_n_u8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_u16() { + unsafe fn test_vdupq_n_u16() { let v: u16 = 64; let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u16x8 = transmute(vmovq_n_u16(v)); + let r: u16x8 = transmute(vdupq_n_u16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_u32() { + unsafe fn test_vdupq_n_u32() { let v: u32 = 64; let e = u32x4::new(64, 64, 64, 64); - let r: u32x4 = transmute(vmovq_n_u32(v)); + let r: u32x4 = transmute(vdupq_n_u32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_u64() { + unsafe fn test_vdupq_n_u64() { let v: u64 = 64; let e = u64x2::new(64, 64); - let r: u64x2 = transmute(vmovq_n_u64(v)); + let r: u64x2 = transmute(vdupq_n_u64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_p8() { + unsafe fn test_vdupq_n_p8() { let v: p8 = 64; let e = u8x16::new( 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, ); - let r: u8x16 = transmute(vmovq_n_p8(v)); + let r: u8x16 = transmute(vdupq_n_p8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_p16() { + unsafe fn test_vdupq_n_p16() { let v: p16 = 64; let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); - let r: u16x8 = transmute(vmovq_n_p16(v)); + let r: u16x8 = transmute(vdupq_n_p16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vmovq_n_f32() { + unsafe fn test_vdupq_n_f32() { let v: f32 = 64.0; let e = f32x4::new(64.0, 64.0, 64.0, 64.0); - let r: f32x4 = transmute(vmovq_n_f32(v)); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vgetq_lane_u64() { - let v = i64x2::new(1, 2); - let r = vgetq_lane_u64::<1>(transmute(v)); - assert_eq!(r, 2); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_s8() { - test_ari_s8( - |i, j| vadd_s8(i, j), - |a: i8, b: i8| -> i8 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_s8() { - testq_ari_s8( - |i, j| vaddq_s8(i, j), - |a: i8, b: i8| -> i8 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vadd_s16() { - test_ari_s16( - |i, j| vadd_s16(i, j), - |a: i16, b: i16| -> i16 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_s16() { - testq_ari_s16( - |i, j| vaddq_s16(i, j), - |a: i16, b: i16| -> i16 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vadd_s32() { - test_ari_s32( - |i, j| vadd_s32(i, j), - |a: i32, b: i32| -> i32 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_s32() { - testq_ari_s32( - |i, j| vaddq_s32(i, j), - |a: i32, b: i32| -> i32 { a.overflowing_add(b).0 }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_u8() { - test_ari_u8( - |i, j| vadd_u8(i, j), - |a: u8, b: u8| -> u8 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_u8() { - testq_ari_u8( - |i, j| vaddq_u8(i, j), - |a: u8, b: u8| -> u8 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vadd_u16() { - test_ari_u16( - |i, j| vadd_u16(i, j), - |a: u16, b: u16| -> u16 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_u16() { - testq_ari_u16( - |i, j| vaddq_u16(i, j), - |a: u16, b: u16| -> u16 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vadd_u32() { - test_ari_u32( - |i, j| vadd_u32(i, j), - |a: u32, b: u32| -> u32 { a.overflowing_add(b).0 }, - ); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_u32() { - testq_ari_u32( - |i, j| vaddq_u32(i, j), - |a: u32, b: u32| -> u32 { a.overflowing_add(b).0 }, - ); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vadd_f32() { - test_ari_f32(|i, j| vadd_f32(i, j), |a: f32, b: f32| -> f32 { a + b }); - } - #[simd_test(enable = "neon")] - unsafe fn test_vaddq_f32() { - testq_ari_f32(|i, j| vaddq_f32(i, j), |a: f32, b: f32| -> f32 { a + b }); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddl_s8() { - let v = i8::MAX; - let a = i8x8::new(v, v, v, v, v, v, v, v); - let v = 2 * (v as i16); - let e = i16x8::new(v, v, v, v, v, v, v, v); - let r: i16x8 = transmute(vaddl_s8(transmute(a), transmute(a))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddl_s16() { - let v = i16::MAX; - let a = i16x4::new(v, v, v, v); - let v = 2 * (v as i32); - let e = i32x4::new(v, v, v, v); - let r: i32x4 = transmute(vaddl_s16(transmute(a), transmute(a))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddl_s32() { - let v = i32::MAX; - let a = i32x2::new(v, v); - let v = 2 * (v as i64); - let e = i64x2::new(v, v); - let r: i64x2 = transmute(vaddl_s32(transmute(a), transmute(a))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vaddl_u8() { - let v = u8::MAX; - let a = u8x8::new(v, v, v, v, v, v, v, v); - let v = 2 * (v as u16); - let e = u16x8::new(v, v, v, v, v, v, v, v); - let r: u16x8 = transmute(vaddl_u8(transmute(a), transmute(a))); + let r: f32x4 = transmute(vdupq_n_f32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_u16() { - let v = u16::MAX; - let a = u16x4::new(v, v, v, v); - let v = 2 * (v as u32); - let e = u32x4::new(v, v, v, v); - let r: u32x4 = transmute(vaddl_u16(transmute(a), transmute(a))); + unsafe fn test_vdup_n_s8() { + let v: i8 = 64; + let e = i8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: i8x8 = transmute(vdup_n_s8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_u32() { - let v = u32::MAX; - let a = u32x2::new(v, v); - let v = 2 * (v as u64); - let e = u64x2::new(v, v); - let r: u64x2 = transmute(vaddl_u32(transmute(a), transmute(a))); + unsafe fn test_vdup_n_s16() { + let v: i16 = 64; + let e = i16x4::new(64, 64, 64, 64); + let r: i16x4 = transmute(vdup_n_s16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_s8() { - let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let x = i8::MAX; - let b = i8x16::new(x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x); - let x = x as i16; - let e = i16x8::new(x + 8, x + 9, x + 10, x + 11, x + 12, x + 13, x + 14, x + 15); - let r: i16x8 = transmute(vaddl_high_s8(transmute(a), transmute(b))); + unsafe fn test_vdup_n_s32() { + let v: i32 = 64; + let e = i32x2::new(64, 64); + let r: i32x2 = transmute(vdup_n_s32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_s16() { - let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let x = i16::MAX; - let b = i16x8::new(x, x, x, x, x, x, x, x); - let x = x as i32; - let e = i32x4::new(x + 4, x + 5, x + 6, x + 7); - let r: i32x4 = transmute(vaddl_high_s16(transmute(a), transmute(b))); + unsafe fn test_vdup_n_s64() { + let v: i64 = 64; + let e = i64x1::new(64); + let r: i64x1 = transmute(vdup_n_s64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_s32() { - let a = i32x4::new(0, 1, 2, 3); - let x = i32::MAX; - let b = i32x4::new(x, x, x, x); - let x = x as i64; - let e = i64x2::new(x + 2, x + 3); - let r: i64x2 = transmute(vaddl_high_s32(transmute(a), transmute(b))); + unsafe fn test_vdup_n_u8() { + let v: u8 = 64; + let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u8x8 = transmute(vdup_n_u8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_u8() { - let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let x = u8::MAX; - let b = u8x16::new(x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x); - let x = x as u16; - let e = u16x8::new(x + 8, x + 9, x + 10, x + 11, x + 12, x + 13, x + 14, x + 15); - let r: u16x8 = transmute(vaddl_high_u8(transmute(a), transmute(b))); + unsafe fn test_vdup_n_u16() { + let v: u16 = 64; + let e = u16x4::new(64, 64, 64, 64); + let r: u16x4 = transmute(vdup_n_u16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_u16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); - let x = u16::MAX; - let b = u16x8::new(x, x, x, x, x, x, x, x); - let x = x as u32; - let e = u32x4::new(x + 4, x + 5, x + 6, x + 7); - let r: u32x4 = transmute(vaddl_high_u16(transmute(a), transmute(b))); + unsafe fn test_vdup_n_u32() { + let v: u32 = 64; + let e = u32x2::new(64, 64); + let r: u32x2 = transmute(vdup_n_u32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddl_high_u32() { - let a = u32x4::new(0, 1, 2, 3); - let x = u32::MAX; - let b = u32x4::new(x, x, x, x); - let x = x as u64; - let e = u64x2::new(x + 2, x + 3); - let r: u64x2 = transmute(vaddl_high_u32(transmute(a), transmute(b))); + unsafe fn test_vdup_n_u64() { + let v: u64 = 64; + let e = u64x1::new(64); + let r: u64x1 = transmute(vdup_n_u64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_s8() { - let x = i16::MAX; - let a = i16x8::new(x, 1, 2, 3, 4, 5, 6, 7); - let y = i8::MAX; - let b = i8x8::new(y, y, y, y, y, y, y, y); - let y = y as i16; - let e = i16x8::new( - x.wrapping_add(y), - 1 + y, - 2 + y, - 3 + y, - 4 + y, - 5 + y, - 6 + y, - 7 + y, - ); - let r: i16x8 = transmute(vaddw_s8(transmute(a), transmute(b))); + unsafe fn test_vdup_n_p8() { + let v: p8 = 64; + let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u8x8 = transmute(vdup_n_p8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_s16() { - let x = i32::MAX; - let a = i32x4::new(x, 1, 2, 3); - let y = i16::MAX; - let b = i16x4::new(y, y, y, y); - let y = y as i32; - let e = i32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); - let r: i32x4 = transmute(vaddw_s16(transmute(a), transmute(b))); + unsafe fn test_vdup_n_p16() { + let v: p16 = 64; + let e = u16x4::new(64, 64, 64, 64); + let r: u16x4 = transmute(vdup_n_p16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_s32() { - let x = i64::MAX; - let a = i64x2::new(x, 1); - let y = i32::MAX; - let b = i32x2::new(y, y); - let y = y as i64; - let e = i64x2::new(x.wrapping_add(y), 1 + y); - let r: i64x2 = transmute(vaddw_s32(transmute(a), transmute(b))); + unsafe fn test_vdup_n_f32() { + let v: f32 = 64.0; + let e = f32x2::new(64.0, 64.0); + let r: f32x2 = transmute(vdup_n_f32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_u8() { - let x = u16::MAX; - let a = u16x8::new(x, 1, 2, 3, 4, 5, 6, 7); - let y = u8::MAX; - let b = u8x8::new(y, y, y, y, y, y, y, y); - let y = y as u16; - let e = u16x8::new( - x.wrapping_add(y), - 1 + y, - 2 + y, - 3 + y, - 4 + y, - 5 + y, - 6 + y, - 7 + y, - ); - let r: u16x8 = transmute(vaddw_u8(transmute(a), transmute(b))); + unsafe fn test_vldrq_p128() { + let v: [p128; 2] = [1, 2]; + let e: p128 = 2; + let r: p128 = vldrq_p128(v[1..].as_ptr()); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_u16() { - let x = u32::MAX; - let a = u32x4::new(x, 1, 2, 3); - let y = u16::MAX; - let b = u16x4::new(y, y, y, y); - let y = y as u32; - let e = u32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); - let r: u32x4 = transmute(vaddw_u16(transmute(a), transmute(b))); + unsafe fn test_vstrq_p128() { + let v: [p128; 2] = [1, 2]; + let e: p128 = 2; + let mut r: p128 = 1; + vstrq_p128(&mut r, v[1]); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_u32() { - let x = u64::MAX; - let a = u64x2::new(x, 1); - let y = u32::MAX; - let b = u32x2::new(y, y); - let y = y as u64; - let e = u64x2::new(x.wrapping_add(y), 1 + y); - let r: u64x2 = transmute(vaddw_u32(transmute(a), transmute(b))); + unsafe fn test_vmov_n_s8() { + let v: i8 = 64; + let e = i8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: i8x8 = transmute(vmov_n_s8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_s8() { - let x = i16::MAX; - let a = i16x8::new(x, 1, 2, 3, 4, 5, 6, 7); - let y = i8::MAX; - let b = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, y, y, y, y, y, y, y, y); - let y = y as i16; - let e = i16x8::new( - x.wrapping_add(y), - 1 + y, - 2 + y, - 3 + y, - 4 + y, - 5 + y, - 6 + y, - 7 + y, - ); - let r: i16x8 = transmute(vaddw_high_s8(transmute(a), transmute(b))); + unsafe fn test_vmov_n_s16() { + let v: i16 = 64; + let e = i16x4::new(64, 64, 64, 64); + let r: i16x4 = transmute(vmov_n_s16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_s16() { - let x = i32::MAX; - let a = i32x4::new(x, 1, 2, 3); - let y = i16::MAX; - let b = i16x8::new(0, 0, 0, 0, y, y, y, y); - let y = y as i32; - let e = i32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); - let r: i32x4 = transmute(vaddw_high_s16(transmute(a), transmute(b))); + unsafe fn test_vmov_n_s32() { + let v: i32 = 64; + let e = i32x2::new(64, 64); + let r: i32x2 = transmute(vmov_n_s32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_s32() { - let x = i64::MAX; - let a = i64x2::new(x, 1); - let y = i32::MAX; - let b = i32x4::new(0, 0, y, y); - let y = y as i64; - let e = i64x2::new(x.wrapping_add(y), 1 + y); - let r: i64x2 = transmute(vaddw_high_s32(transmute(a), transmute(b))); + unsafe fn test_vmov_n_s64() { + let v: i64 = 64; + let e = i64x1::new(64); + let r: i64x1 = transmute(vmov_n_s64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_u8() { - let x = u16::MAX; - let a = u16x8::new(x, 1, 2, 3, 4, 5, 6, 7); - let y = u8::MAX; - let b = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, y, y, y, y, y, y, y, y); - let y = y as u16; - let e = u16x8::new( - x.wrapping_add(y), - 1 + y, - 2 + y, - 3 + y, - 4 + y, - 5 + y, - 6 + y, - 7 + y, - ); - let r: u16x8 = transmute(vaddw_high_u8(transmute(a), transmute(b))); + unsafe fn test_vmov_n_u8() { + let v: u8 = 64; + let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u8x8 = transmute(vmov_n_u8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_u16() { - let x = u32::MAX; - let a = u32x4::new(x, 1, 2, 3); - let y = u16::MAX; - let b = u16x8::new(0, 0, 0, 0, y, y, y, y); - let y = y as u32; - let e = u32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); - let r: u32x4 = transmute(vaddw_high_u16(transmute(a), transmute(b))); + unsafe fn test_vmov_n_u16() { + let v: u16 = 64; + let e = u16x4::new(64, 64, 64, 64); + let r: u16x4 = transmute(vmov_n_u16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddw_high_u32() { - let x = u64::MAX; - let a = u64x2::new(x, 1); - let y = u32::MAX; - let b = u32x4::new(0, 0, y, y); - let y = y as u64; - let e = u64x2::new(x.wrapping_add(y), 1 + y); - let r: u64x2 = transmute(vaddw_high_u32(transmute(a), transmute(b))); + unsafe fn test_vmov_n_u32() { + let v: u32 = 64; + let e = u32x2::new(64, 64); + let r: u32x2 = transmute(vmov_n_u32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_s16() { - let a = i16x8::new( - (0 << 8) + 1, - (1 << 8) + 1, - (2 << 8) + 1, - (3 << 8) + 1, - (4 << 8) + 1, - (5 << 8) + 1, - (6 << 8) + 1, - (7 << 8) + 1, - ); - let e = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14); - let r: i8x8 = transmute(vaddhn_s16(transmute(a), transmute(a))); + unsafe fn test_vmov_n_u64() { + let v: u64 = 64; + let e = u64x1::new(64); + let r: u64x1 = transmute(vmov_n_u64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_s32() { - let a = i32x4::new((0 << 16) + 1, (1 << 16) + 1, (2 << 16) + 1, (3 << 16) + 1); - let e = i16x4::new(0, 2, 4, 6); - let r: i16x4 = transmute(vaddhn_s32(transmute(a), transmute(a))); + unsafe fn test_vmov_n_p8() { + let v: p8 = 64; + let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u8x8 = transmute(vmov_n_p8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_s64() { - let a = i64x2::new((0 << 32) + 1, (1 << 32) + 1); - let e = i32x2::new(0, 2); - let r: i32x2 = transmute(vaddhn_s64(transmute(a), transmute(a))); + unsafe fn test_vmov_n_p16() { + let v: p16 = 64; + let e = u16x4::new(64, 64, 64, 64); + let r: u16x4 = transmute(vmov_n_p16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_u16() { - let a = u16x8::new( - (0 << 8) + 1, - (1 << 8) + 1, - (2 << 8) + 1, - (3 << 8) + 1, - (4 << 8) + 1, - (5 << 8) + 1, - (6 << 8) + 1, - (7 << 8) + 1, - ); - let e = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14); - let r: u8x8 = transmute(vaddhn_u16(transmute(a), transmute(a))); + unsafe fn test_vmov_n_f32() { + let v: f32 = 64.0; + let e = f32x2::new(64.0, 64.0); + let r: f32x2 = transmute(vmov_n_f32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_u32() { - let a = u32x4::new((0 << 16) + 1, (1 << 16) + 1, (2 << 16) + 1, (3 << 16) + 1); - let e = u16x4::new(0, 2, 4, 6); - let r: u16x4 = transmute(vaddhn_u32(transmute(a), transmute(a))); + unsafe fn test_vmovq_n_s8() { + let v: i8 = 64; + let e = i8x16::new( + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + ); + let r: i8x16 = transmute(vmovq_n_s8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_u64() { - let a = u64x2::new((0 << 32) + 1, (1 << 32) + 1); - let e = u32x2::new(0, 2); - let r: u32x2 = transmute(vaddhn_u64(transmute(a), transmute(a))); + unsafe fn test_vmovq_n_s16() { + let v: i16 = 64; + let e = i16x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: i16x8 = transmute(vmovq_n_s16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_s16() { - let r = i8x8::splat(42); - let a = i16x8::new( - (0 << 8) + 1, - (1 << 8) + 1, - (2 << 8) + 1, - (3 << 8) + 1, - (4 << 8) + 1, - (5 << 8) + 1, - (6 << 8) + 1, - (7 << 8) + 1, - ); - let e = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 0, 2, 4, 6, 8, 10, 12, 14); - let r: i8x16 = transmute(vaddhn_high_s16(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_s32() { + let v: i32 = 64; + let e = i32x4::new(64, 64, 64, 64); + let r: i32x4 = transmute(vmovq_n_s32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_s32() { - let r = i16x4::splat(42); - let a = i32x4::new((0 << 16) + 1, (1 << 16) + 1, (2 << 16) + 1, (3 << 16) + 1); - let e = i16x8::new(42, 42, 42, 42, 0, 2, 4, 6); - let r: i16x8 = transmute(vaddhn_high_s32(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_s64() { + let v: i64 = 64; + let e = i64x2::new(64, 64); + let r: i64x2 = transmute(vmovq_n_s64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_s64() { - let r = i32x2::splat(42); - let a = i64x2::new((0 << 32) + 1, (1 << 32) + 1); - let e = i32x4::new(42, 42, 0, 2); - let r: i32x4 = transmute(vaddhn_high_s64(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_u8() { + let v: u8 = 64; + let e = u8x16::new( + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + ); + let r: u8x16 = transmute(vmovq_n_u8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_u16() { - let r = u8x8::splat(42); - let a = u16x8::new( - (0 << 8) + 1, - (1 << 8) + 1, - (2 << 8) + 1, - (3 << 8) + 1, - (4 << 8) + 1, - (5 << 8) + 1, - (6 << 8) + 1, - (7 << 8) + 1, - ); - let e = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 0, 2, 4, 6, 8, 10, 12, 14); - let r: u8x16 = transmute(vaddhn_high_u16(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_u16() { + let v: u16 = 64; + let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u16x8 = transmute(vmovq_n_u16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_u32() { - let r = u16x4::splat(42); - let a = u32x4::new((0 << 16) + 1, (1 << 16) + 1, (2 << 16) + 1, (3 << 16) + 1); - let e = u16x8::new(42, 42, 42, 42, 0, 2, 4, 6); - let r: u16x8 = transmute(vaddhn_high_u32(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_u32() { + let v: u32 = 64; + let e = u32x4::new(64, 64, 64, 64); + let r: u32x4 = transmute(vmovq_n_u32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vaddhn_high_u64() { - let r = u32x2::splat(42); - let a = u64x2::new((0 << 32) + 1, (1 << 32) + 1); - let e = u32x4::new(42, 42, 0, 2); - let r: u32x4 = transmute(vaddhn_high_u64(transmute(r), transmute(a), transmute(a))); + unsafe fn test_vmovq_n_u64() { + let v: u64 = 64; + let e = u64x2::new(64, 64); + let r: u64x2 = transmute(vmovq_n_u64(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_s16() { - let round_constant: i16 = (1 << 8) - 1; - let a = i16x8::new( - 0 << 8, - 1 << 8, - 2 << 8, - 3 << 8, - 4 << 8, - 5 << 8, - 6 << 8, - 7 << 8, - ); - let b = i16x8::new( - 0 << 8, - (1 << 8) + round_constant, - 2 << 8, - (3 << 8) + round_constant, - 4 << 8, - (5 << 8) + round_constant, - 6 << 8, - (7 << 8) + round_constant, + unsafe fn test_vmovq_n_p8() { + let v: p8 = 64; + let e = u8x16::new( + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, ); - let e = i8x8::new(0, 3, 4, 7, 8, 11, 12, 15); - let r: i8x8 = transmute(vraddhn_s16(transmute(a), transmute(b))); + let r: u8x16 = transmute(vmovq_n_p8(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_s32() { - let round_constant: i32 = (1 << 16) - 1; - let a = i32x4::new(0 << 16, 1 << 16, 2 << 16, 3 << 16); - let b = i32x4::new( - 0 << 16, - (1 << 16) + round_constant, - 2 << 16, - (3 << 16) + round_constant, - ); - let e = i16x4::new(0, 3, 4, 7); - let r: i16x4 = transmute(vraddhn_s32(transmute(a), transmute(b))); + unsafe fn test_vmovq_n_p16() { + let v: p16 = 64; + let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64); + let r: u16x8 = transmute(vmovq_n_p16(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_s64() { - let round_constant: i64 = (1 << 32) - 1; - let a = i64x2::new(0 << 32, 1 << 32); - let b = i64x2::new(0 << 32, (1 << 32) + round_constant); - let e = i32x2::new(0, 3); - let r: i32x2 = transmute(vraddhn_s64(transmute(a), transmute(b))); + unsafe fn test_vmovq_n_f32() { + let v: f32 = 64.0; + let e = f32x4::new(64.0, 64.0, 64.0, 64.0); + let r: f32x4 = transmute(vmovq_n_f32(v)); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_u16() { - let round_constant: u16 = (1 << 8) - 1; - let a = u16x8::new( - 0 << 8, - 1 << 8, - 2 << 8, - 3 << 8, - 4 << 8, - 5 << 8, - 6 << 8, - 7 << 8, + unsafe fn test_vgetq_lane_u64() { + let v = i64x2::new(1, 2); + let r = vgetq_lane_u64::<1>(transmute(v)); + assert_eq!(r, 2); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vadd_s8() { + test_ari_s8( + |i, j| vadd_s8(i, j), + |a: i8, b: i8| -> i8 { a.overflowing_add(b).0 }, ); - let b = u16x8::new( - 0 << 8, - (1 << 8) + round_constant, - 2 << 8, - (3 << 8) + round_constant, - 4 << 8, - (5 << 8) + round_constant, - 6 << 8, - (7 << 8) + round_constant, + } + #[simd_test(enable = "neon")] + unsafe fn test_vaddq_s8() { + testq_ari_s8( + |i, j| vaddq_s8(i, j), + |a: i8, b: i8| -> i8 { a.overflowing_add(b).0 }, ); - let e = u8x8::new(0, 3, 4, 7, 8, 11, 12, 15); - let r: u8x8 = transmute(vraddhn_u16(transmute(a), transmute(b))); - assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_u32() { - let round_constant: u32 = (1 << 16) - 1; - let a = u32x4::new(0 << 16, 1 << 16, 2 << 16, 3 << 16); - let b = u32x4::new( - 0 << 16, - (1 << 16) + round_constant, - 2 << 16, - (3 << 16) + round_constant, + unsafe fn test_vadd_s16() { + test_ari_s16( + |i, j| vadd_s16(i, j), + |a: i16, b: i16| -> i16 { a.overflowing_add(b).0 }, ); - let e = u16x4::new(0, 3, 4, 7); - let r: u16x4 = transmute(vraddhn_u32(transmute(a), transmute(b))); - assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_u64() { - let round_constant: u64 = (1 << 32) - 1; - let a = u64x2::new(0 << 32, 1 << 32); - let b = u64x2::new(0 << 32, (1 << 32) + round_constant); - let e = u32x2::new(0, 3); - let r: u32x2 = transmute(vraddhn_u64(transmute(a), transmute(b))); - assert_eq!(r, e); + unsafe fn test_vaddq_s16() { + testq_ari_s16( + |i, j| vaddq_s16(i, j), + |a: i16, b: i16| -> i16 { a.overflowing_add(b).0 }, + ); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_s16() { - let r = i8x8::splat(42); - let round_constant: i16 = (1 << 8) - 1; - let a = i16x8::new( - 0 << 8, - 1 << 8, - 2 << 8, - 3 << 8, - 4 << 8, - 5 << 8, - 6 << 8, - 7 << 8, + unsafe fn test_vadd_s32() { + test_ari_s32( + |i, j| vadd_s32(i, j), + |a: i32, b: i32| -> i32 { a.overflowing_add(b).0 }, ); - let b = i16x8::new( - 0 << 8, - (1 << 8) + round_constant, - 2 << 8, - (3 << 8) + round_constant, - 4 << 8, - (5 << 8) + round_constant, - 6 << 8, - (7 << 8) + round_constant, + } + #[simd_test(enable = "neon")] + unsafe fn test_vaddq_s32() { + testq_ari_s32( + |i, j| vaddq_s32(i, j), + |a: i32, b: i32| -> i32 { a.overflowing_add(b).0 }, ); - let e = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 0, 3, 4, 7, 8, 11, 12, 15); - let r: i8x16 = transmute(vraddhn_high_s16(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_s32() { - let r = i16x4::splat(42); - let round_constant: i32 = (1 << 16) - 1; - let a = i32x4::new(0 << 16, 1 << 16, 2 << 16, 3 << 16); - let b = i32x4::new( - 0 << 16, - (1 << 16) + round_constant, - 2 << 16, - (3 << 16) + round_constant, + unsafe fn test_vadd_u8() { + test_ari_u8( + |i, j| vadd_u8(i, j), + |a: u8, b: u8| -> u8 { a.overflowing_add(b).0 }, ); - let e = i16x8::new(42, 42, 42, 42, 0, 3, 4, 7); - let r: i16x8 = transmute(vraddhn_high_s32(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_s64() { - let r = i32x2::splat(42); - let round_constant: i64 = (1 << 32) - 1; - let a = i64x2::new(0 << 32, 1 << 32); - let b = i64x2::new(0 << 32, (1 << 32) + round_constant); - let e = i32x4::new(42, 42, 0, 3); - let r: i32x4 = transmute(vraddhn_high_s64(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); + unsafe fn test_vaddq_u8() { + testq_ari_u8( + |i, j| vaddq_u8(i, j), + |a: u8, b: u8| -> u8 { a.overflowing_add(b).0 }, + ); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_u16() { - let r = u8x8::splat(42); - let round_constant: u16 = (1 << 8) - 1; - let a = u16x8::new( - 0 << 8, - 1 << 8, - 2 << 8, - 3 << 8, - 4 << 8, - 5 << 8, - 6 << 8, - 7 << 8, + unsafe fn test_vadd_u16() { + test_ari_u16( + |i, j| vadd_u16(i, j), + |a: u16, b: u16| -> u16 { a.overflowing_add(b).0 }, ); - let b = u16x8::new( - 0 << 8, - (1 << 8) + round_constant, - 2 << 8, - (3 << 8) + round_constant, - 4 << 8, - (5 << 8) + round_constant, - 6 << 8, - (7 << 8) + round_constant, + } + #[simd_test(enable = "neon")] + unsafe fn test_vaddq_u16() { + testq_ari_u16( + |i, j| vaddq_u16(i, j), + |a: u16, b: u16| -> u16 { a.overflowing_add(b).0 }, ); - let e = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 0, 3, 4, 7, 8, 11, 12, 15); - let r: u8x16 = transmute(vraddhn_high_u16(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_u32() { - let r = u16x4::splat(42); - let round_constant: u32 = (1 << 16) - 1; - let a = u32x4::new(0 << 16, 1 << 16, 2 << 16, 3 << 16); - let b = u32x4::new( - 0 << 16, - (1 << 16) + round_constant, - 2 << 16, - (3 << 16) + round_constant, + unsafe fn test_vadd_u32() { + test_ari_u32( + |i, j| vadd_u32(i, j), + |a: u32, b: u32| -> u32 { a.overflowing_add(b).0 }, + ); + } + #[simd_test(enable = "neon")] + unsafe fn test_vaddq_u32() { + testq_ari_u32( + |i, j| vaddq_u32(i, j), + |a: u32, b: u32| -> u32 { a.overflowing_add(b).0 }, ); - let e = u16x8::new(42, 42, 42, 42, 0, 3, 4, 7); - let r: u16x8 = transmute(vraddhn_high_s32(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vraddhn_high_u64() { - let r = u32x2::splat(42); - let round_constant: u64 = (1 << 32) - 1; - let a = u64x2::new(0 << 32, 1 << 32); - let b = u64x2::new(0 << 32, (1 << 32) + round_constant); - let e = u32x4::new(42, 42, 0, 3); - let r: u32x4 = transmute(vraddhn_high_s64(transmute(r), transmute(a), transmute(b))); - assert_eq!(r, e); + unsafe fn test_vadd_f32() { + test_ari_f32(|i, j| vadd_f32(i, j), |a: f32, b: f32| -> f32 { a + b }); + } + #[simd_test(enable = "neon")] + unsafe fn test_vaddq_f32() { + testq_ari_f32(|i, j| vaddq_f32(i, j), |a: f32, b: f32| -> f32 { a + b }); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_s8() { - let a = i8x8::new(-4, -3, -2, -1, 0, 1, 2, 3); - let r: i16x4 = transmute(vpaddl_s8(transmute(a))); - let e = i16x4::new(-7, -3, 1, 5); + unsafe fn test_vaddl_s8() { + let v = i8::MAX; + let a = i8x8::new(v, v, v, v, v, v, v, v); + let v = 2 * (v as i16); + let e = i16x8::new(v, v, v, v, v, v, v, v); + let r: i16x8 = transmute(vaddl_s8(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_s16() { - let a = i16x4::new(-2, -1, 0, 1); - let r: i32x2 = transmute(vpaddl_s16(transmute(a))); - let e = i32x2::new(-3, 1); + unsafe fn test_vaddl_s16() { + let v = i16::MAX; + let a = i16x4::new(v, v, v, v); + let v = 2 * (v as i32); + let e = i32x4::new(v, v, v, v); + let r: i32x4 = transmute(vaddl_s16(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_s32() { - let a = i32x2::new(-1, 0); - let r: i64x1 = transmute(vpaddl_s32(transmute(a))); - let e = i64x1::new(-1); + unsafe fn test_vaddl_s32() { + let v = i32::MAX; + let a = i32x2::new(v, v); + let v = 2 * (v as i64); + let e = i64x2::new(v, v); + let r: i64x2 = transmute(vaddl_s32(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_s8() { - let a = i8x16::new(-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7); - let r: i16x8 = transmute(vpaddlq_s8(transmute(a))); - let e = i16x8::new(-15, -11, -7, -3, 1, 5, 9, 13); + unsafe fn test_vaddl_u8() { + let v = u8::MAX; + let a = u8x8::new(v, v, v, v, v, v, v, v); + let v = 2 * (v as u16); + let e = u16x8::new(v, v, v, v, v, v, v, v); + let r: u16x8 = transmute(vaddl_u8(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_s16() { - let a = i16x8::new(-4, -3, -2, -1, 0, 1, 2, 3); - let r: i32x4 = transmute(vpaddlq_s16(transmute(a))); - let e = i32x4::new(-7, -3, 1, 5); + unsafe fn test_vaddl_u16() { + let v = u16::MAX; + let a = u16x4::new(v, v, v, v); + let v = 2 * (v as u32); + let e = u32x4::new(v, v, v, v); + let r: u32x4 = transmute(vaddl_u16(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_s32() { - let a = i32x4::new(-2, -1, 0, 1); - let r: i64x2 = transmute(vpaddlq_s32(transmute(a))); - let e = i64x2::new(-3, 1); + unsafe fn test_vaddl_u32() { + let v = u32::MAX; + let a = u32x2::new(v, v); + let v = 2 * (v as u64); + let e = u64x2::new(v, v); + let r: u64x2 = transmute(vaddl_u32(transmute(a), transmute(a))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_u8() { - let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, u8::MAX); - let r: u16x4 = transmute(vpaddl_u8(transmute(a))); - let e = u16x4::new(1, 5, 9, u8::MAX as u16 + 6); + unsafe fn test_vaddl_high_s8() { + let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let x = i8::MAX; + let b = i8x16::new(x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x); + let x = x as i16; + let e = i16x8::new(x + 8, x + 9, x + 10, x + 11, x + 12, x + 13, x + 14, x + 15); + let r: i16x8 = transmute(vaddl_high_s8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_u16() { - let a = u16x4::new(0, 1, 2, u16::MAX); - let r: u32x2 = transmute(vpaddl_u16(transmute(a))); - let e = u32x2::new(1, u16::MAX as u32 + 2); + unsafe fn test_vaddl_high_s16() { + let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let x = i16::MAX; + let b = i16x8::new(x, x, x, x, x, x, x, x); + let x = x as i32; + let e = i32x4::new(x + 4, x + 5, x + 6, x + 7); + let r: i32x4 = transmute(vaddl_high_s16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddl_u32() { - let a = u32x2::new(1, u32::MAX); - let r: u64x1 = transmute(vpaddl_u32(transmute(a))); - let e = u64x1::new(u32::MAX as u64 + 1); + unsafe fn test_vaddl_high_s32() { + let a = i32x4::new(0, 1, 2, 3); + let x = i32::MAX; + let b = i32x4::new(x, x, x, x); + let x = x as i64; + let e = i64x2::new(x + 2, x + 3); + let r: i64x2 = transmute(vaddl_high_s32(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_u8() { - let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, u8::MAX); - let r: u16x8 = transmute(vpaddlq_u8(transmute(a))); - let e = u16x8::new(1, 5, 9, 13, 17, 21, 25, u8::MAX as u16 + 14); + unsafe fn test_vaddl_high_u8() { + let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let x = u8::MAX; + let b = u8x16::new(x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x); + let x = x as u16; + let e = u16x8::new(x + 8, x + 9, x + 10, x + 11, x + 12, x + 13, x + 14, x + 15); + let r: u16x8 = transmute(vaddl_high_u8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_u16() { - let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, u16::MAX); - let r: u32x4 = transmute(vpaddlq_u16(transmute(a))); - let e = u32x4::new(1, 5, 9, u16::MAX as u32 + 6); + unsafe fn test_vaddl_high_u16() { + let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7); + let x = u16::MAX; + let b = u16x8::new(x, x, x, x, x, x, x, x); + let x = x as u32; + let e = u32x4::new(x + 4, x + 5, x + 6, x + 7); + let r: u32x4 = transmute(vaddl_high_u16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpaddlq_u32() { - let a = u32x4::new(0, 1, 2, u32::MAX); - let r: u64x2 = transmute(vpaddlq_u32(transmute(a))); - let e = u64x2::new(1, u32::MAX as u64 + 2); + unsafe fn test_vaddl_high_u32() { + let a = u32x4::new(0, 1, 2, 3); + let x = u32::MAX; + let b = u32x4::new(x, x, x, x); + let x = x as u64; + let e = u64x2::new(x + 2, x + 3); + let r: u64x2 = transmute(vaddl_high_u32(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_s8() { - let a = i16x4::new(42, 42, 42, 42); - let b = i8x8::new(-4, -3, -2, -1, 0, 1, 2, 3); - let r: i16x4 = transmute(vpadal_s8(transmute(a), transmute(b))); - let e = i16x4::new(35, 39, 43, 47); + unsafe fn test_vaddw_s8() { + let x = i16::MAX; + let a = i16x8::new(x, 1, 2, 3, 4, 5, 6, 7); + let y = i8::MAX; + let b = i8x8::new(y, y, y, y, y, y, y, y); + let y = y as i16; + let e = i16x8::new( + x.wrapping_add(y), + 1 + y, + 2 + y, + 3 + y, + 4 + y, + 5 + y, + 6 + y, + 7 + y, + ); + let r: i16x8 = transmute(vaddw_s8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_s16() { - let a = i32x2::new(42, 42); - let b = i16x4::new(-2, -1, 0, 1); - let r: i32x2 = transmute(vpadal_s16(transmute(a), transmute(b))); - let e = i32x2::new(39, 43); + unsafe fn test_vaddw_s16() { + let x = i32::MAX; + let a = i32x4::new(x, 1, 2, 3); + let y = i16::MAX; + let b = i16x4::new(y, y, y, y); + let y = y as i32; + let e = i32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); + let r: i32x4 = transmute(vaddw_s16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_s32() { - let a = i64x1::new(42); - let b = i32x2::new(-1, 0); - let r: i64x1 = transmute(vpadal_s32(transmute(a), transmute(b))); - let e = i64x1::new(41); + unsafe fn test_vaddw_s32() { + let x = i64::MAX; + let a = i64x2::new(x, 1); + let y = i32::MAX; + let b = i32x2::new(y, y); + let y = y as i64; + let e = i64x2::new(x.wrapping_add(y), 1 + y); + let r: i64x2 = transmute(vaddw_s32(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_s8() { - let a = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let b = i8x16::new(-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7); - let r: i16x8 = transmute(vpadalq_s8(transmute(a), transmute(b))); - let e = i16x8::new(27, 31, 35, 39, 43, 47, 51, 55); + unsafe fn test_vaddw_u8() { + let x = u16::MAX; + let a = u16x8::new(x, 1, 2, 3, 4, 5, 6, 7); + let y = u8::MAX; + let b = u8x8::new(y, y, y, y, y, y, y, y); + let y = y as u16; + let e = u16x8::new( + x.wrapping_add(y), + 1 + y, + 2 + y, + 3 + y, + 4 + y, + 5 + y, + 6 + y, + 7 + y, + ); + let r: u16x8 = transmute(vaddw_u8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_s16() { - let a = i32x4::new(42, 42, 42, 42); - let b = i16x8::new(-4, -3, -2, -1, 0, 1, 2, 3); - let r: i32x4 = transmute(vpadalq_s16(transmute(a), transmute(b))); - let e = i32x4::new(35, 39, 43, 47); + unsafe fn test_vaddw_u16() { + let x = u32::MAX; + let a = u32x4::new(x, 1, 2, 3); + let y = u16::MAX; + let b = u16x4::new(y, y, y, y); + let y = y as u32; + let e = u32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); + let r: u32x4 = transmute(vaddw_u16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_s32() { - let a = i64x2::new(42, 42); - let b = i32x4::new(-2, -1, 0, 1); - let r: i64x2 = transmute(vpadalq_s32(transmute(a), transmute(b))); - let e = i64x2::new(39, 43); + unsafe fn test_vaddw_u32() { + let x = u64::MAX; + let a = u64x2::new(x, 1); + let y = u32::MAX; + let b = u32x2::new(y, y); + let y = y as u64; + let e = u64x2::new(x.wrapping_add(y), 1 + y); + let r: u64x2 = transmute(vaddw_u32(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_u8() { - let a = u16x4::new(42, 42, 42, 42); - let b = u8x8::new(0, 1, 2, 3, 4, 5, 6, u8::MAX); - let r: u16x4 = transmute(vpadal_u8(transmute(a), transmute(b))); - let e = u16x4::new(43, 47, 51, u8::MAX as u16 + 48); + unsafe fn test_vaddw_high_s8() { + let x = i16::MAX; + let a = i16x8::new(x, 1, 2, 3, 4, 5, 6, 7); + let y = i8::MAX; + let b = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, y, y, y, y, y, y, y, y); + let y = y as i16; + let e = i16x8::new( + x.wrapping_add(y), + 1 + y, + 2 + y, + 3 + y, + 4 + y, + 5 + y, + 6 + y, + 7 + y, + ); + let r: i16x8 = transmute(vaddw_high_s8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_u16() { - let a = u32x2::new(42, 42); - let b = u16x4::new(0, 1, 2, u16::MAX); - let r: u32x2 = transmute(vpadal_u16(transmute(a), transmute(b))); - let e = u32x2::new(43, u16::MAX as u32 + 44); + unsafe fn test_vaddw_high_s16() { + let x = i32::MAX; + let a = i32x4::new(x, 1, 2, 3); + let y = i16::MAX; + let b = i16x8::new(0, 0, 0, 0, y, y, y, y); + let y = y as i32; + let e = i32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); + let r: i32x4 = transmute(vaddw_high_s16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadal_u32() { - let a = u64x1::new(42); - let b = u32x2::new(1, u32::MAX); - let r: u64x1 = transmute(vpadal_u32(transmute(a), transmute(b))); - let e = u64x1::new(u32::MAX as u64 + 43); + unsafe fn test_vaddw_high_s32() { + let x = i64::MAX; + let a = i64x2::new(x, 1); + let y = i32::MAX; + let b = i32x4::new(0, 0, y, y); + let y = y as i64; + let e = i64x2::new(x.wrapping_add(y), 1 + y); + let r: i64x2 = transmute(vaddw_high_s32(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_u8() { - let a = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42); - let b = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, u8::MAX); - let r: u16x8 = transmute(vpadalq_u8(transmute(a), transmute(b))); - let e = u16x8::new(43, 47, 51, 55, 59, 63, 67, u8::MAX as u16 + 56); + unsafe fn test_vaddw_high_u8() { + let x = u16::MAX; + let a = u16x8::new(x, 1, 2, 3, 4, 5, 6, 7); + let y = u8::MAX; + let b = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, y, y, y, y, y, y, y, y); + let y = y as u16; + let e = u16x8::new( + x.wrapping_add(y), + 1 + y, + 2 + y, + 3 + y, + 4 + y, + 5 + y, + 6 + y, + 7 + y, + ); + let r: u16x8 = transmute(vaddw_high_u8(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_u16() { - let a = u32x4::new(42, 42, 42, 42); - let b = u16x8::new(0, 1, 2, 3, 4, 5, 6, u16::MAX); - let r: u32x4 = transmute(vpadalq_u16(transmute(a), transmute(b))); - let e = u32x4::new(43, 47, 51, u16::MAX as u32 + 48); + unsafe fn test_vaddw_high_u16() { + let x = u32::MAX; + let a = u32x4::new(x, 1, 2, 3); + let y = u16::MAX; + let b = u16x8::new(0, 0, 0, 0, y, y, y, y); + let y = y as u32; + let e = u32x4::new(x.wrapping_add(y), 1 + y, 2 + y, 3 + y); + let r: u32x4 = transmute(vaddw_high_u16(transmute(a), transmute(b))); assert_eq!(r, e); } #[simd_test(enable = "neon")] - unsafe fn test_vpadalq_u32() { - let a = u64x2::new(42, 42); - let b = u32x4::new(0, 1, 2, u32::MAX); - let r: u64x2 = transmute(vpadalq_u32(transmute(a), transmute(b))); - let e = u64x2::new(43, u32::MAX as u64 + 44); + unsafe fn test_vaddw_high_u32() { + let x = u64::MAX; + let a = u64x2::new(x, 1); + let y = u32::MAX; + let b = u32x4::new(0, 0, y, y); + let y = y as u64; + let e = u64x2::new(x.wrapping_add(y), 1 + y); + let r: u64x2 = transmute(vaddw_high_u32(transmute(a), transmute(b))); assert_eq!(r, e); } @@ -13490,132 +10827,6 @@ mod tests { assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_s8() { - let a = i8x8::new(1, -2, 3, -4, 5, 6, 7, 8); - let b = i8x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = i8x8::new(-2, -4, 5, 7, 0, 2, 4, 6); - let r: i8x8 = transmute(vpmin_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_s16() { - let a = i16x4::new(1, 2, 3, -4); - let b = i16x4::new(0, 3, 2, 5); - let e = i16x4::new(1, -4, 0, 2); - let r: i16x4 = transmute(vpmin_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_s32() { - let a = i32x2::new(1, -2); - let b = i32x2::new(0, 3); - let e = i32x2::new(-2, 0); - let r: i32x2 = transmute(vpmin_s32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_u8() { - let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = u8x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = u8x8::new(1, 3, 5, 7, 0, 2, 4, 6); - let r: u8x8 = transmute(vpmin_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_u16() { - let a = u16x4::new(1, 2, 3, 4); - let b = u16x4::new(0, 3, 2, 5); - let e = u16x4::new(1, 3, 0, 2); - let r: u16x4 = transmute(vpmin_u16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_u32() { - let a = u32x2::new(1, 2); - let b = u32x2::new(0, 3); - let e = u32x2::new(1, 0); - let r: u32x2 = transmute(vpmin_u32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmin_f32() { - let a = f32x2::new(1., -2.); - let b = f32x2::new(0., 3.); - let e = f32x2::new(-2., 0.); - let r: f32x2 = transmute(vpmin_f32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_s8() { - let a = i8x8::new(1, -2, 3, -4, 5, 6, 7, 8); - let b = i8x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = i8x8::new(1, 3, 6, 8, 3, 5, 7, 9); - let r: i8x8 = transmute(vpmax_s8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_s16() { - let a = i16x4::new(1, 2, 3, -4); - let b = i16x4::new(0, 3, 2, 5); - let e = i16x4::new(2, 3, 3, 5); - let r: i16x4 = transmute(vpmax_s16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_s32() { - let a = i32x2::new(1, -2); - let b = i32x2::new(0, 3); - let e = i32x2::new(1, 3); - let r: i32x2 = transmute(vpmax_s32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_u8() { - let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = u8x8::new(0, 3, 2, 5, 4, 7, 6, 9); - let e = u8x8::new(2, 4, 6, 8, 3, 5, 7, 9); - let r: u8x8 = transmute(vpmax_u8(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_u16() { - let a = u16x4::new(1, 2, 3, 4); - let b = u16x4::new(0, 3, 2, 5); - let e = u16x4::new(2, 4, 3, 5); - let r: u16x4 = transmute(vpmax_u16(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_u32() { - let a = u32x2::new(1, 2); - let b = u32x2::new(0, 3); - let e = u32x2::new(2, 3); - let r: u32x2 = transmute(vpmax_u32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon")] - unsafe fn test_vpmax_f32() { - let a = f32x2::new(1., -2.); - let b = f32x2::new(0., 3.); - let e = f32x2::new(1., 3.); - let r: f32x2 = transmute(vpmax_f32(transmute(a), transmute(b))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] unsafe fn test_vand_s8() { test_bit_s8(|i, j| vand_s8(i, j), |a: i8, b: i8| -> i8 { a & b }); @@ -13818,42 +11029,78 @@ mod tests { unsafe fn test_vceq_s8() { test_cmp_s8( |i, j| vceq_s8(i, j), - |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a == b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s8() { testq_cmp_s8( |i, j| vceqq_s8(i, j), - |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a == b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s16() { test_cmp_s16( |i, j| vceq_s16(i, j), - |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a == b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s16() { testq_cmp_s16( |i, j| vceqq_s16(i, j), - |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a == b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s32() { test_cmp_s32( |i, j| vceq_s32(i, j), - |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s32() { testq_cmp_s32( |i, j| vceqq_s32(i, j), - |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -13861,42 +11108,78 @@ mod tests { unsafe fn test_vceq_u8() { test_cmp_u8( |i, j| vceq_u8(i, j), - |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a == b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u8() { testq_cmp_u8( |i, j| vceqq_u8(i, j), - |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a == b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u16() { test_cmp_u16( |i, j| vceq_u16(i, j), - |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a == b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u16() { testq_cmp_u16( |i, j| vceqq_u16(i, j), - |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a == b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u32() { test_cmp_u32( |i, j| vceq_u32(i, j), - |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u32() { testq_cmp_u32( |i, j| vceqq_u32(i, j), - |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -13904,14 +11187,26 @@ mod tests { unsafe fn test_vceq_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a == b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -13919,42 +11214,78 @@ mod tests { unsafe fn test_vcgt_s8() { test_cmp_s8( |i, j| vcgt_s8(i, j), - |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a > b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s8() { testq_cmp_s8( |i, j| vcgtq_s8(i, j), - |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a > b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s16() { test_cmp_s16( |i, j| vcgt_s16(i, j), - |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a > b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s16() { testq_cmp_s16( |i, j| vcgtq_s16(i, j), - |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a > b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s32() { test_cmp_s32( |i, j| vcgt_s32(i, j), - |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a > b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s32() { testq_cmp_s32( |i, j| vcgtq_s32(i, j), - |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a > b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -13962,42 +11293,78 @@ mod tests { unsafe fn test_vcgt_u8() { test_cmp_u8( |i, j| vcgt_u8(i, j), - |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a > b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u8() { testq_cmp_u8( |i, j| vcgtq_u8(i, j), - |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a > b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u16() { test_cmp_u16( |i, j| vcgt_u16(i, j), - |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a > b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u16() { testq_cmp_u16( |i, j| vcgtq_u16(i, j), - |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a > b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u32() { test_cmp_u32( |i, j| vcgt_u32(i, j), - |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a > b { + 0xFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u32() { testq_cmp_u32( |i, j| vcgtq_u32(i, j), - |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a > b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14005,14 +11372,26 @@ mod tests { unsafe fn test_vcgt_f32() { test_cmp_f32( |i, j| vcgt_f32(i, j), - |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a > b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_f32() { testq_cmp_f32( |i, j| vcgtq_f32(i, j), - |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a > b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14020,42 +11399,78 @@ mod tests { unsafe fn test_vclt_s8() { test_cmp_s8( |i, j| vclt_s8(i, j), - |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a < b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s8() { testq_cmp_s8( |i, j| vcltq_s8(i, j), - |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a < b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s16() { test_cmp_s16( |i, j| vclt_s16(i, j), - |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a < b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s16() { testq_cmp_s16( |i, j| vcltq_s16(i, j), - |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a < b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s32() { test_cmp_s32( |i, j| vclt_s32(i, j), - |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a < b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s32() { testq_cmp_s32( |i, j| vcltq_s32(i, j), - |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a < b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14063,42 +11478,78 @@ mod tests { unsafe fn test_vclt_u8() { test_cmp_u8( |i, j| vclt_u8(i, j), - |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a < b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u8() { testq_cmp_u8( |i, j| vcltq_u8(i, j), - |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a < b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u16() { test_cmp_u16( |i, j| vclt_u16(i, j), - |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a < b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u16() { testq_cmp_u16( |i, j| vcltq_u16(i, j), - |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a < b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u32() { test_cmp_u32( |i, j| vclt_u32(i, j), - |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a < b { + 0xFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u32() { testq_cmp_u32( |i, j| vcltq_u32(i, j), - |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a < b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14106,14 +11557,26 @@ mod tests { unsafe fn test_vclt_f32() { test_cmp_f32( |i, j| vclt_f32(i, j), - |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a < b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_f32() { testq_cmp_f32( |i, j| vcltq_f32(i, j), - |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a < b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14121,42 +11584,78 @@ mod tests { unsafe fn test_vcle_s8() { test_cmp_s8( |i, j| vcle_s8(i, j), - |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a <= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s8() { testq_cmp_s8( |i, j| vcleq_s8(i, j), - |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a <= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s16() { test_cmp_s16( |i, j| vcle_s16(i, j), - |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a <= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s16() { testq_cmp_s16( |i, j| vcleq_s16(i, j), - |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a <= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s32() { test_cmp_s32( |i, j| vcle_s32(i, j), - |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s32() { testq_cmp_s32( |i, j| vcleq_s32(i, j), - |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14164,42 +11663,78 @@ mod tests { unsafe fn test_vcle_u8() { test_cmp_u8( |i, j| vcle_u8(i, j), - |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a <= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u8() { testq_cmp_u8( |i, j| vcleq_u8(i, j), - |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a <= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u16() { test_cmp_u16( |i, j| vcle_u16(i, j), - |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a <= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u16() { testq_cmp_u16( |i, j| vcleq_u16(i, j), - |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a <= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u32() { test_cmp_u32( |i, j| vcle_u32(i, j), - |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u32() { testq_cmp_u32( |i, j| vcleq_u32(i, j), - |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14207,14 +11742,26 @@ mod tests { unsafe fn test_vcle_f32() { test_cmp_f32( |i, j| vcle_f32(i, j), - |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_f32() { testq_cmp_f32( |i, j| vcleq_f32(i, j), - |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a <= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14222,42 +11769,78 @@ mod tests { unsafe fn test_vcge_s8() { test_cmp_s8( |i, j| vcge_s8(i, j), - |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a >= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s8() { testq_cmp_s8( |i, j| vcgeq_s8(i, j), - |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, + |a: i8, b: i8| -> u8 { + if a >= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s16() { test_cmp_s16( |i, j| vcge_s16(i, j), - |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a >= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s16() { testq_cmp_s16( |i, j| vcgeq_s16(i, j), - |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, + |a: i16, b: i16| -> u16 { + if a >= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s32() { test_cmp_s32( |i, j| vcge_s32(i, j), - |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s32() { testq_cmp_s32( |i, j| vcgeq_s32(i, j), - |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: i32, b: i32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14265,42 +11848,78 @@ mod tests { unsafe fn test_vcge_u8() { test_cmp_u8( |i, j| vcge_u8(i, j), - |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a >= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u8() { testq_cmp_u8( |i, j| vcgeq_u8(i, j), - |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, + |a: u8, b: u8| -> u8 { + if a >= b { + 0xFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u16() { test_cmp_u16( |i, j| vcge_u16(i, j), - |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a >= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u16() { testq_cmp_u16( |i, j| vcgeq_u16(i, j), - |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, + |a: u16, b: u16| -> u16 { + if a >= b { + 0xFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u32() { test_cmp_u32( |i, j| vcge_u32(i, j), - |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u32() { testq_cmp_u32( |i, j| vcgeq_u32(i, j), - |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: u32, b: u32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14308,14 +11927,26 @@ mod tests { unsafe fn test_vcge_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, + |a: f32, b: f32| -> u32 { + if a >= b { + 0xFFFFFFFF + } else { + 0 + } + }, ); } @@ -14831,49 +12462,6 @@ mod tests { ); } - #[simd_test(enable = "neon")] - unsafe fn test_vabs_s8() { - let a = i8x8::new(-1, 0, 1, -2, 0, 2, -128, 127); - let r: i8x8 = transmute(vabs_s8(transmute(a))); - let e = i8x8::new(1, 0, 1, 2, 0, 2, -128, 127); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabsq_s8() { - let a = i8x16::new(-1, 0, 1, -2, 0, 2, -128, 127, -1, 0, 1, -2, 0, 2, -128, 127); - let r: i8x16 = transmute(vabsq_s8(transmute(a))); - let e = i8x16::new(1, 0, 1, 2, 0, 2, -128, 127, 1, 0, 1, 2, 0, 2, -128, 127); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabs_s16() { - let a = i16x4::new(-1, 0, i16::MIN, i16::MAX); - let r: i16x4 = transmute(vabs_s16(transmute(a))); - let e = i16x4::new(1, 0, i16::MIN, i16::MAX); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabsq_s16() { - let a = i16x8::new(-1, 0, i16::MIN, i16::MAX, -1, 0, i16::MIN, i16::MAX); - let r: i16x8 = transmute(vabsq_s16(transmute(a))); - let e = i16x8::new(1, 0, i16::MIN, i16::MAX, 1, 0, i16::MIN, i16::MAX); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabs_s32() { - let a = i32x2::new(i32::MIN, i32::MIN + 1); - let r: i32x2 = transmute(vabs_s32(transmute(a))); - let e = i32x2::new(i32::MIN, i32::MAX); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vabsq_s32() { - let a = i32x4::new(i32::MIN, i32::MIN + 1, 0, -1); - let r: i32x4 = transmute(vabsq_s32(transmute(a))); - let e = i32x4::new(i32::MIN, i32::MAX, 0, 1); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] unsafe fn test_vaba_s8() { let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); @@ -14987,117 +12575,6 @@ mod tests { assert_eq!(r, e); } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_s16() { - let a = i16x4::new(1, 2, 3, 4); - let b = i16x4::new(0, -1, -2, -3); - let r: i16x4 = transmute(vpadd_s16(transmute(a), transmute(b))); - let e = i16x4::new(3, 7, -1, -5); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_s32() { - let a = i32x2::new(1, 2); - let b = i32x2::new(0, -1); - let r: i32x2 = transmute(vpadd_s32(transmute(a), transmute(b))); - let e = i32x2::new(3, -1); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_s8() { - let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = i8x8::new(0, -1, -2, -3, -4, -5, -6, -7); - let r: i8x8 = transmute(vpadd_s8(transmute(a), transmute(b))); - let e = i8x8::new(3, 7, 11, 15, -1, -5, -9, -13); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_u16() { - let a = u16x4::new(1, 2, 3, 4); - let b = u16x4::new(30, 31, 32, 33); - let r: u16x4 = transmute(vpadd_u16(transmute(a), transmute(b))); - let e = u16x4::new(3, 7, 61, 65); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_u32() { - let a = u32x2::new(1, 2); - let b = u32x2::new(30, 31); - let r: u32x2 = transmute(vpadd_u32(transmute(a), transmute(b))); - let e = u32x2::new(3, 61); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vpadd_u8() { - let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8); - let b = u8x8::new(30, 31, 32, 33, 34, 35, 36, 37); - let r: u8x8 = transmute(vpadd_u8(transmute(a), transmute(b))); - let e = u8x8::new(3, 7, 11, 15, 61, 65, 69, 73); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcnt_s8() { - let a: i8x8 = transmute(u8x8::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, - )); - let e = i8x8::new(3, 8, 0, 7, 2, 4, 1, 6); - let r: i8x8 = transmute(vcnt_s8(transmute(a))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcntq_s8() { - let a: i8x16 = transmute(u8x16::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, 0b11101110, 0b00000000, 0b11111111, 0b00100001, 0b11111111, 0b10010111, - 0b11100000, 0b00010000, - )); - let e = i8x16::new(3, 8, 0, 7, 2, 4, 1, 6, 6, 0, 8, 2, 8, 5, 3, 1); - let r: i8x16 = transmute(vcntq_s8(transmute(a))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcnt_u8() { - let a = u8x8::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, - ); - let e = u8x8::new(3, 8, 0, 7, 2, 4, 1, 6); - let r: u8x8 = transmute(vcnt_u8(transmute(a))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcntq_u8() { - let a = u8x16::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, 0b11101110, 0b00000000, 0b11111111, 0b00100001, 0b11111111, 0b10010111, - 0b11100000, 0b00010000, - ); - let e = u8x16::new(3, 8, 0, 7, 2, 4, 1, 6, 6, 0, 8, 2, 8, 5, 3, 1); - let r: u8x16 = transmute(vcntq_u8(transmute(a))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcnt_p8() { - let a = u8x8::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, - ); - let e = u8x8::new(3, 8, 0, 7, 2, 4, 1, 6); - let r: u8x8 = transmute(vcnt_p8(transmute(a))); - assert_eq!(r, e); - } - #[simd_test(enable = "neon")] - unsafe fn test_vcntq_p8() { - let a = u8x16::new( - 0b11001000, 0b11111111, 0b00000000, 0b11011111, 0b10000001, 0b10101001, 0b00001000, - 0b00111111, 0b11101110, 0b00000000, 0b11111111, 0b00100001, 0b11111111, 0b10010111, - 0b11100000, 0b00010000, - ); - let e = u8x16::new(3, 8, 0, 7, 2, 4, 1, 6, 6, 0, 8, 2, 8, 5, 3, 1); - let r: u8x16 = transmute(vcntq_p8(transmute(a))); - assert_eq!(r, e); - } #[simd_test(enable = "neon")] unsafe fn test_vrev16_s8() { let a = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7); @@ -15350,35 +12827,6 @@ mod tests { let e: u16x8 = transmute(vrev64q_p16(transmute(a))); assert_eq!(r, e); } - #[simd_test(enable = "neon,i8mm")] - unsafe fn test_vmmlaq_s32() { - let a = i32x4::new(1, 3, 4, -0x10000); - let b = i8x16::new(1, 21, 31, 14, 5, 6, -128, 8, 9, 13, 15, 12, 13, -1, 20, 16); - let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16); - let e = i32x4::new(123, -5353, 690, -65576); - let r: i32x4 = transmute(vmmlaq_s32(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,i8mm")] - unsafe fn test_vmmlaq_u32() { - let a = u32x4::new(1, 3, 4, 0xffff0000); - let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16); - let c = u8x16::new(12, 22, 3, 4, 255, 56, 7, 8, 91, 10, 128, 15, 13, 14, 17, 16); - let e = u32x4::new(3195, 6935, 18354, 4294909144); - let r: u32x4 = transmute(vmmlaq_u32(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } - - #[simd_test(enable = "neon,i8mm")] - unsafe fn test_vusmmlaq_s32() { - let a = i32x4::new(1, 3, 4, -0x10000); - let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16); - let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16); - let e = i32x4::new(1915, -1001, 15026, -61992); - let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c))); - assert_eq!(r, e); - } macro_rules! test_vcombine { ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { From 18799954def3aee6519826f99ef0e2a8ce3d8668 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 31 Jan 2025 11:16:48 +0000 Subject: [PATCH 04/13] Update test runner to support big endian --- crates/intrinsic-test/Cargo.toml | 4 +- crates/intrinsic-test/src/argument.rs | 8 +- crates/intrinsic-test/src/intrinsic.rs | 6 +- crates/intrinsic-test/src/main.rs | 194 ++++++++++++++++++------- crates/intrinsic-test/src/types.rs | 6 +- 5 files changed, 150 insertions(+), 68 deletions(-) diff --git a/crates/intrinsic-test/Cargo.toml b/crates/intrinsic-test/Cargo.toml index a358bea4b8..06051abc8d 100644 --- a/crates/intrinsic-test/Cargo.toml +++ b/crates/intrinsic-test/Cargo.toml @@ -4,7 +4,9 @@ version = "0.1.0" authors = ["Jamie Cunliffe ", "James McGregor "] + "Jacob Bramley ", + "James Barford-Evans " + ] license = "MIT OR Apache-2.0" edition = "2024" diff --git a/crates/intrinsic-test/src/argument.rs b/crates/intrinsic-test/src/argument.rs index ebabe31927..155e150d40 100644 --- a/crates/intrinsic-test/src/argument.rs +++ b/crates/intrinsic-test/src/argument.rs @@ -209,13 +209,13 @@ impl ArgumentList { /// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at /// an offset `i` using a load intrinsic, in C. /// e.g `uint8x8_t a = vld1_u8(&a_vals[i]);` - pub fn load_values_c(&self, indentation: Indentation, p64_armv7_workaround: bool) -> String { + pub fn load_values_c(&self, indentation: Indentation, target: &str) -> String { self.iter() .filter_map(|arg| { // The ACLE doesn't support 64-bit polynomial loads on Armv7 // This and the cast are a workaround for this let armv7_p64 = if let TypeKind::Poly = arg.ty.kind() { - p64_armv7_workaround + target.contains("v7") } else { false }; @@ -226,7 +226,7 @@ impl ArgumentList { ty = arg.to_c_type(), name = arg.name, load = if arg.is_simd() { - arg.ty.get_load_function(p64_armv7_workaround) + arg.ty.get_load_function(target) } else { "*".to_string() }, @@ -258,7 +258,7 @@ impl ArgumentList { name = arg.name, vals_name = arg.rust_vals_array_name(), load = if arg.is_simd() { - arg.ty.get_load_function(false) + arg.ty.get_load_function("__") } else { "*".to_string() }, diff --git a/crates/intrinsic-test/src/intrinsic.rs b/crates/intrinsic-test/src/intrinsic.rs index b83c371ea4..b5c1071777 100644 --- a/crates/intrinsic-test/src/intrinsic.rs +++ b/crates/intrinsic-test/src/intrinsic.rs @@ -91,7 +91,7 @@ impl Intrinsic { indentation: Indentation, additional: &str, passes: u32, - p64_armv7_workaround: bool, + target: &str, ) -> String { let body_indentation = indentation.nested(); format!( @@ -100,9 +100,7 @@ impl Intrinsic { {body_indentation}auto __return_value = {intrinsic_call}({args});\n\ {print_result}\n\ {indentation}}}", - loaded_args = self - .arguments - .load_values_c(body_indentation, p64_armv7_workaround), + loaded_args = self.arguments.load_values_c(body_indentation, target), intrinsic_call = self.name, args = self.arguments.as_call_param_c(), print_result = self.print_result_c(body_indentation, additional) diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index 58966d230c..e24d87a86e 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -37,7 +37,7 @@ fn gen_code_c( intrinsic: &Intrinsic, constraints: &[&Argument], name: String, - p64_armv7_workaround: bool, + target: &str, ) -> String { if let Some((current, constraints)) = constraints.split_last() { let range = current @@ -62,13 +62,13 @@ fn gen_code_c( intrinsic, constraints, format!("{name}-{i}"), - p64_armv7_workaround + target, ) ) }) .join("\n") } else { - intrinsic.generate_loop_c(indentation, &name, PASSES, p64_armv7_workaround) + intrinsic.generate_loop_c(indentation, &name, PASSES, target) } } @@ -76,7 +76,7 @@ fn generate_c_program( notices: &str, header_files: &[&str], intrinsic: &Intrinsic, - p64_armv7_workaround: bool, + target: &str, ) -> String { let constraints = intrinsic .arguments @@ -131,7 +131,7 @@ int main(int argc, char **argv) {{ intrinsic, constraints.as_slice(), Default::default(), - p64_armv7_workaround + target, ), ) } @@ -174,7 +174,7 @@ fn gen_code_rust( } } -fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, a32: bool) -> String { +fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, target: &str) -> String { let constraints = intrinsic .arguments .iter() @@ -201,7 +201,11 @@ fn main() {{ {passes} }} "#, - target_arch = if a32 { "arm" } else { "aarch64" }, + target_arch = if target.starts_with("aarch64") { + "aarch64" + } else { + "arm" + }, arglists = intrinsic .arguments .gen_arglists_rust(indentation.nested(), PASSES), @@ -214,22 +218,68 @@ fn main() {{ ) } -fn compile_c(c_filename: &str, intrinsic: &Intrinsic, compiler: &str, a32: bool) -> bool { +fn compile_c( + c_filename: &str, + intrinsic: &Intrinsic, + compiler: &str, + target: &str, + cxx_toolchain_dir: Option<&str>, +) -> bool { let flags = std::env::var("CPPFLAGS").unwrap_or("".into()); + let arch_flags = if target.starts_with("aarch64") { + "-march=armv8.6-a+crypto+sha3+crc+dotprod" + } else { + "-march=armv8.6-a+crypto+crc+dotprod" + }; - let output = Command::new("sh") - .arg("-c") - .arg(format!( - // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations - "{cpp} {cppflags} {arch_flags} -ffp-contract=off -Wno-narrowing -O2 -target {target} -o c_programs/{intrinsic} {filename}", - target = if a32 { "armv7-unknown-linux-gnueabihf" } else { "aarch64-unknown-linux-gnu" }, - arch_flags = if a32 { "-march=armv8.6-a+crypto+crc+dotprod" } else { "-march=armv8.6-a+crypto+sha3+crc+dotprod" }, - filename = c_filename, - intrinsic = intrinsic.name, - cpp = compiler, - cppflags = flags, - )) - .output(); + let intrinsic_name = &intrinsic.name; + + let compiler_command = if target == "aarch64_be-unknown-linux-gnu" { + let Some(cxx_toolchain_dir) = cxx_toolchain_dir else { + panic!("When setting `--target aarch64_be-unknown-linux-gnu` the C++ compilers toolchain directory must be set with `--cxx-toolchain-dir `"); + }; + + /* clang++ cannot link an aarch64_be object file, so we invoke + * aarch64_be-unknown-linux-gnu's C++ linker. This ensures that we + * are testing the intrinsics against LLVM. + * + * Note: setting `--sysroot=<...>` which is the obvious thing to do + * does not work as it gets caught up with `#include_next ` + * not existing... */ + format!( + "{compiler} {flags} {arch_flags} \ + -ffp-contract=off \ + -Wno-narrowing \ + -O2 \ + --target=aarch64_be-unknown-linux-gnu \ + -I{cxx_toolchain_dir}/include \ + -I{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include \ + -I{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.2.1 \ + -I{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.2.1/aarch64_be-none-linux-gnu \ + -I{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.2.1/backward \ + -I{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/libc/usr/include \ + -c {c_filename} \ + -o c_programs/{intrinsic_name}.o && \ + {cxx_toolchain_dir}/bin/aarch64_be-none-linux-gnu-g++ c_programs/{intrinsic_name}.o -o c_programs/{intrinsic_name} && \ + rm c_programs/{intrinsic_name}.o", + ) + } else { + // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations + let base_compiler_command = format!( + "{compiler} {flags} {arch_flags} -o c_programs/{intrinsic_name} {c_filename} -ffp-contract=off -Wno-narrowing -O2" + ); + + /* `-target` can be passed to some c++ compilers, however if we want to + * use a c++ compiler does not support this flag we do not want to pass + * the flag. */ + if compiler.contains("clang") { + format!("{base_compiler_command} -target {target}") + } else { + format!("{base_compiler_command} -flax-vector-conversions") + } + }; + + let output = Command::new("sh").arg("-c").arg(compiler_command).output(); if let Ok(output) = output { if output.status.success() { true @@ -258,7 +308,13 @@ fn build_notices(line_prefix: &str) -> String { ) } -fn build_c(notices: &str, intrinsics: &Vec, compiler: Option<&str>, a32: bool) -> bool { +fn build_c( + notices: &str, + intrinsics: &Vec, + compiler: Option<&str>, + target: &str, + cxx_toolchain_dir: Option<&str>, +) -> bool { let _ = std::fs::create_dir("c_programs"); intrinsics .par_iter() @@ -266,25 +322,31 @@ fn build_c(notices: &str, intrinsics: &Vec, compiler: Option<&str>, a let c_filename = format!(r#"c_programs/{}.cpp"#, i.name); let mut file = File::create(&c_filename).unwrap(); - let c_code = generate_c_program(notices, &["arm_neon.h", "arm_acle.h"], i, a32); + let c_code = generate_c_program(notices, &["arm_neon.h", "arm_acle.h"], i, target); file.write_all(c_code.into_bytes().as_slice()).unwrap(); match compiler { None => true, - Some(compiler) => compile_c(&c_filename, i, compiler, a32), + Some(compiler) => compile_c(&c_filename, i, compiler, target, cxx_toolchain_dir), } }) .find_any(|x| !x) .is_none() } -fn build_rust(notices: &str, intrinsics: &[Intrinsic], toolchain: Option<&str>, a32: bool) -> bool { +fn build_rust( + notices: &str, + intrinsics: &[Intrinsic], + toolchain: Option<&str>, + target: &str, + linker: Option<&str>, +) -> bool { intrinsics.iter().for_each(|i| { let rust_dir = format!(r#"rust_programs/{}"#, i.name); let _ = std::fs::create_dir_all(&rust_dir); let rust_filename = format!(r#"{rust_dir}/main.rs"#); let mut file = File::create(&rust_filename).unwrap(); - let c_code = generate_rust_program(notices, i, a32); + let c_code = generate_rust_program(notices, i, target); file.write_all(c_code.into_bytes().as_slice()).unwrap(); }); @@ -330,26 +392,33 @@ path = "{intrinsic}/main.rs""#, Some(t) => t, }; + /* If there has been a linker explicitly set from the command line then + * we want to set it via setting it in the RUSTFLAGS*/ + let mut rust_flags = "-Cdebuginfo=0".to_string(); + if let Some(linker) = linker { + rust_flags.push_str(" -Clinker="); + rust_flags.push_str(linker); + rust_flags.push_str(" -Clink-args=-static"); + } + + let cargo_command = format!( + "cargo {toolchain} build --target {target} --release", + toolchain = toolchain, + target = target + ); + let output = Command::new("sh") .current_dir("rust_programs") .arg("-c") - .arg(format!( - "cargo {toolchain} build --target {target} --release", - toolchain = toolchain, - target = if a32 { - "armv7-unknown-linux-gnueabihf" - } else { - "aarch64-unknown-linux-gnu" - }, - )) - .env("RUSTFLAGS", "-Cdebuginfo=0") + .arg(cargo_command) + .env("RUSTFLAGS", rust_flags) .output(); if let Ok(output) = output { if output.status.success() { true } else { error!( - "Failed to compile code for intrinsics\n\nstdout:\n{}\n\nstderr:\n{}", + "Failed to compile code for rust intrinsics\n\nstdout:\n{}\n\nstderr:\n{}", std::str::from_utf8(&output.stdout).unwrap_or(""), std::str::from_utf8(&output.stderr).unwrap_or("") ); @@ -387,13 +456,21 @@ struct Cli { #[arg(long)] skip: Option, - /// Run tests for A32 instrinsics instead of A64 - #[arg(long)] - a32: bool, - /// Regenerate test programs, but don't build or run them #[arg(long)] generate_only: bool, + + /// Pass a target the test suite + #[arg(long, default_value_t = String::from("aarch64-unknown-linux-gnu"))] + target: String, + + /// Set the linker + #[arg(long)] + linker: Option, + + /// Set the sysroot for the C++ compiler + #[arg(long)] + cxx_toolchain_dir: Option, } fn main() { @@ -403,6 +480,10 @@ fn main() { let filename = args.input; let c_runner = args.runner.unwrap_or_default(); + let target: &str = args.target.as_str(); + let linker = args.linker.as_deref(); + let cxx_toolchain_dir = args.cxx_toolchain_dir; + let skip = if let Some(filename) = args.skip { let data = std::fs::read_to_string(&filename).expect("Failed to open file"); data.lines() @@ -413,7 +494,7 @@ fn main() { } else { Default::default() }; - let a32 = args.a32; + let a32 = target.contains("v7"); let mut intrinsics = get_neon_intrinsics(&filename).expect("Error parsing input file"); intrinsics.sort_by(|a, b| a.name.cmp(&b.name)); @@ -450,16 +531,22 @@ fn main() { let notices = build_notices("// "); - if !build_c(¬ices, &intrinsics, cpp_compiler.as_deref(), a32) { + if !build_c( + ¬ices, + &intrinsics, + cpp_compiler.as_deref(), + target, + cxx_toolchain_dir.as_deref(), + ) { std::process::exit(2); } - if !build_rust(¬ices, &intrinsics, toolchain.as_deref(), a32) { + if !build_rust(¬ices, &intrinsics, toolchain.as_deref(), target, linker) { std::process::exit(3); } - if let Some(ref toolchain) = toolchain { - if !compare_outputs(&intrinsics, toolchain, &c_runner, a32) { + if let Some(ref _toolchain) = toolchain { + if !compare_outputs(&intrinsics, &c_runner, target) { std::process::exit(1) } } @@ -471,7 +558,7 @@ enum FailureReason { Difference(String, String, String), } -fn compare_outputs(intrinsics: &Vec, toolchain: &str, runner: &str, a32: bool) -> bool { +fn compare_outputs(intrinsics: &Vec, runner: &str, target: &str) -> bool { let intrinsics = intrinsics .par_iter() .filter_map(|intrinsic| { @@ -483,20 +570,15 @@ fn compare_outputs(intrinsics: &Vec, toolchain: &str, runner: &str, a intrinsic = intrinsic.name, )) .output(); + let rust = Command::new("sh") - .current_dir("rust_programs") .arg("-c") .arg(format!( - "cargo {toolchain} run --target {target} --bin {intrinsic} --release", + "{runner} ./rust_programs/target/{target}/release/{intrinsic}", + runner = runner, + target = target, intrinsic = intrinsic.name, - toolchain = toolchain, - target = if a32 { - "armv7-unknown-linux-gnueabihf" - } else { - "aarch64-unknown-linux-gnu" - }, )) - .env("RUSTFLAGS", "-Cdebuginfo=0") .output(); let (c, rust) = match (c, rust) { diff --git a/crates/intrinsic-test/src/types.rs b/crates/intrinsic-test/src/types.rs index 1eb44896f7..90559b5935 100644 --- a/crates/intrinsic-test/src/types.rs +++ b/crates/intrinsic-test/src/types.rs @@ -375,9 +375,9 @@ impl IntrinsicType { } /// Determines the load function for this type. - pub fn get_load_function(&self, armv7_p64_workaround: bool) -> String { + pub fn get_load_function(&self, target: &str) -> String { match self { - IntrinsicType::Ptr { child, .. } => child.get_load_function(armv7_p64_workaround), + IntrinsicType::Ptr { child, .. } => child.get_load_function(target), IntrinsicType::Type { kind: k, bit_len: Some(bl), @@ -397,7 +397,7 @@ impl IntrinsicType { TypeKind::Int => "s", TypeKind::Float => "f", // The ACLE doesn't support 64-bit polynomial loads on Armv7 - TypeKind::Poly => if armv7_p64_workaround && *bl == 64 {"s"} else {"p"}, + TypeKind::Poly => if target.starts_with("armv7") && *bl == 64 {"s"} else {"p"}, x => todo!("get_load_function TypeKind: {:#?}", x), }, size = bl, From db90975d7b15f275076e86bb44293faa2f3c76b7 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Fri, 31 Jan 2025 11:17:21 +0000 Subject: [PATCH 05/13] Update CI to facilitate big endian at some point --- .../aarch64_be-none-linux-gnu/Dockerfile | 13 +++ ci/run.sh | 80 +++++++++++++++---- 2 files changed, 78 insertions(+), 15 deletions(-) create mode 100644 ci/docker/aarch64_be-none-linux-gnu/Dockerfile diff --git a/ci/docker/aarch64_be-none-linux-gnu/Dockerfile b/ci/docker/aarch64_be-none-linux-gnu/Dockerfile new file mode 100644 index 0000000000..91026f6a15 --- /dev/null +++ b/ci/docker/aarch64_be-none-linux-gnu/Dockerfile @@ -0,0 +1,13 @@ +FROM ubuntu:24.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + g++ \ + ca-certificates \ + libc6-dev \ + libc6-dev-arm64-cross \ + qemu-user \ + make \ + file \ + clang-18 \ + lld diff --git a/ci/run.sh b/ci/run.sh index 28d53c5375..11cffd6de8 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -108,6 +108,7 @@ if [ "$NOSTD" != "1" ]; then cargo_test "${STDARCH_EXAMPLES} ${PROFILE}" fi + # Test targets compiled with extra features. case ${TARGET} in x86*) @@ -134,26 +135,75 @@ case ${TARGET} in export RUSTFLAGS="${OLD_RUSTFLAGS} -C target-feature=+vsx" cargo_test "${PROFILE}" ;; + + # Setup aarch64 & armv7 specific variables, the runner, along with some + # tests to skip + aarch64-unknown-linux-gnu*) + TEST_CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" + TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt + TEST_CXX_COMPILER="clang++-15" + TEST_RUNNER="${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" + ;; + armv7-unknown-linux-gnueabihf*) + TEST_CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" + TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_arm.txt + TEST_CXX_COMPILER="clang++-15" + TEST_RUNNER="${CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER}" + ;; *) ;; esac -if [ "${TARGET}" = "aarch64-unknown-linux-gnu" ]; then - ( - CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" \ - RUSTFLAGS="$HOST_RUSTFLAGS" \ - RUST_LOG=warn \ - cargo run ${INTRINSIC_TEST} "${PROFILE}" --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" --cppcompiler "clang++-15" --skip crates/intrinsic-test/missing_aarch64.txt - ) -elif [ "${TARGET}" = "armv7-unknown-linux-gnueabihf" ]; then - ( - CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" \ - RUSTFLAGS="$HOST_RUSTFLAGS" \ - RUST_LOG=warn \ - cargo run ${INTRINSIC_TEST} "${PROFILE}" --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json --runner "${CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER}" --cppcompiler "clang++-15" --skip crates/intrinsic-test/missing_arm.txt --a32 - ) -fi +# Arm specific +case "${TARGET}" in + aarch64-unknown-linux-gnu*|armv7-unknown-linux-gnueabihf*) + CPPFLAGS="${TEST_CPPFLAGS}" RUSTFLAGS="${HOST_RUSTFLAGS}" RUST_LOG=warn \ + cargo run "${INTRINSIC_TEST}" "${PROFILE}" \ + --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json \ + --runner "${TEST_RUNNER}" \ + --cppcompiler "${TEST_CXX_COMPILER}" \ + --skip "${TEST_SKIP_INTRINSICS}" \ + --target "${TARGET}" + ;; + + aarch64_be-unknown-linux-gnu) + # get the aarch64_be toolchain + TOOLCHAIN="arm-gnu-toolchain-14.2.rel1-x86_64-aarch64_be-none-linux-gnu" + + # Download the aarch64_be gcc toolchain + curl -L "https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/${TOOLCHAIN}.tar.xz" \ + -o "${TOOLCHAIN}.tar.xz" && \ + tar -xzvf "./${TOOLCHAIN}".tar.xz && \ + mdkir /toolchains && + mv "./${TOOLCHAIN}" /toolchains + + # Build the test suite + AARCH64_BE_TOOLCHAIN="/toolchains/${TOOLCHAIN}" + AARCH64_BE_LIBC="${AARCH64_BE_TOOLCHAIN}/aarch64_be-none-linux-gnu/libc" + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="${AARCH64_BE_TOOLCHAIN}/bin/aarch64_be-none-linux-gnu-gcc" \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64_be -L ${AARCH64_BE_LIBC}" \ + CPPFLAGS="-fuse-ld=lld" \ + RUSTFLAGS="-C linker=${AARCH64_BE}/bin/aarch64_be-none-linux-gnu-gcc -C link-args=-static" \ + cargo build \ + --target="${TARGET}" \ + --manifest-path=crates/intrinsic-test/Cargo.toml \ + --profile=release + + # Now run it + qemu-aarch64_be -L "${AARCH64_BE_LIBC}" \ + "./target/${TARGET}/release/intrinsic-test" \ + "./intrinsics_data/arm_intrinsics.json" \ + --target "${TARGET}" \ + --cppcompiler "clang++-18" \ + --skip crates/intrinsic-test/missing_aarch64.txt \ + --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" \ + --linker "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER}" \ + --cxx-toolchain-dir "${AARCH64_BE_TOOLCHAIN}" + ;; + *) + ;; +esac if [ "$NORUN" != "1" ] && [ "$NOSTD" != 1 ]; then # Test examples From 87a9a1f243fe9a648aef011b77fc76bd9f3630a4 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 4 Feb 2025 15:09:37 +0000 Subject: [PATCH 06/13] ensure correct linker gets chosen --- crates/intrinsic-test/src/main.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index e24d87a86e..9f6b6bcb28 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -395,11 +395,6 @@ path = "{intrinsic}/main.rs""#, /* If there has been a linker explicitly set from the command line then * we want to set it via setting it in the RUSTFLAGS*/ let mut rust_flags = "-Cdebuginfo=0".to_string(); - if let Some(linker) = linker { - rust_flags.push_str(" -Clinker="); - rust_flags.push_str(linker); - rust_flags.push_str(" -Clink-args=-static"); - } let cargo_command = format!( "cargo {toolchain} build --target {target} --release", @@ -407,12 +402,24 @@ path = "{intrinsic}/main.rs""#, target = target ); - let output = Command::new("sh") + let mut command = Command::new("sh"); + + command .current_dir("rust_programs") .arg("-c") - .arg(cargo_command) - .env("RUSTFLAGS", rust_flags) - .output(); + .arg(cargo_command); + + if let Some(linker) = linker { + rust_flags.push_str(" -C linker="); + rust_flags.push_str(linker); + rust_flags.push_str(" -C link-args=-static"); + + command.env("CPPFLAGS", "-fuse-ld=lld"); + } + + command.env("RUSTFLAGS", rust_flags); + let output = command.output(); + if let Ok(output) = output { if output.status.success() { true From 1fe46bd2d8998c304741c03ebb8584f24cf76d88 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 11 Feb 2025 15:35:26 +0000 Subject: [PATCH 07/13] fix non-working intrinsics --- crates/core_arch/src/arm/neon.rs | 127 ++ .../src/arm_shared/neon/generated.rs | 1662 ++++++++--------- .../arm_shared/neon/shift_and_insert_tests.rs | 8 +- .../spec/neon/arm_shared.spec.yml | 365 ++-- 4 files changed, 1056 insertions(+), 1106 deletions(-) diff --git a/crates/core_arch/src/arm/neon.rs b/crates/core_arch/src/arm/neon.rs index 3badab5a4c..90c358b5db 100644 --- a/crates/core_arch/src/arm/neon.rs +++ b/crates/core_arch/src/arm/neon.rs @@ -1,5 +1,8 @@ use crate::core_arch::arm_shared::neon::*; +#[cfg(test)] +use stdarch_test::assert_instr; + #[allow(improper_ctypes)] unsafe extern "unadjusted" { #[link_name = "llvm.arm.neon.vbsl.v8i8"] @@ -7,3 +10,127 @@ unsafe extern "unadjusted" { #[link_name = "llvm.arm.neon.vbsl.v16i8"] fn vbslq_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(0 <= N && N <= 63); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(0 <= N && N <= 63); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )) +} + +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(0 <= N && N <= 63); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )) +} + +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(1 <= N && N <= 64); + let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); + let ret_val: poly64x2_t = transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )); + simd_shuffle!(ret_val, ret_val, [0, 1]) +} diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 7592d0e02f..074613851c 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -81,10 +81,14 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { - __crc32cw( - __crc32cw(crc, (data & 0xFFFFFFFF) as u32), - (data >> 32) as u32, - ) + let a: i32 = crc as i32; + let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; + let c: i32 = (data >> 32).as_signed() as i32; + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] + fn ___crc32cw(crc: i32, data: i32) -> i32; + } + ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32 } #[doc = "CRC32-C single round checksum for bytes (16 bits)."] @@ -156,10 +160,14 @@ pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { - __crc32w( - __crc32w(crc, (data & 0xFFFFFFFF) as u32), - (data >> 32) as u32, - ) + let a: i32 = crc as i32; + let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; + let c: i32 = (data >> 32).as_signed() as i32; + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] + fn ___crc32w(crc: i32, data: i32) -> i32; + } + ___crc32w(___crc32w(a, b), c).as_unsigned() } #[doc = "CRC32 single round checksum for bytes (16 bits)."] @@ -226,7 +234,7 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -247,7 +255,7 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -271,7 +279,7 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -292,7 +300,7 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -316,7 +324,7 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -337,7 +345,7 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -361,7 +369,7 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -382,7 +390,7 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -406,7 +414,7 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -427,7 +435,7 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -449,7 +457,7 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -470,7 +478,7 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(sadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -494,7 +502,7 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -515,7 +523,7 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -539,7 +547,7 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -560,7 +568,7 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -584,7 +592,7 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -605,7 +613,7 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -629,7 +637,7 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -650,7 +658,7 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -674,7 +682,7 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -695,7 +703,7 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -717,7 +725,7 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -738,7 +746,7 @@ unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(uadalp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") @@ -20734,7 +20742,7 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { transmute(vld1_v2f32( ptr as *const i8, @@ -20751,7 +20759,7 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { let ret_val: float32x2_t = transmute(vld1_v2f32( ptr as *const i8, @@ -20948,7 +20956,7 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { transmute(vld1_v2i32( ptr as *const i8, @@ -20965,7 +20973,7 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vld1_v2i32( ptr as *const i8, @@ -21017,7 +21025,7 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { transmute(vld1_v1i64( ptr as *const i8, @@ -21204,22 +21212,6 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - transmute(vld1_v1i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) -} - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] @@ -21280,9 +21272,9 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0")] fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; } _vld1_f32_x2(a) @@ -21313,9 +21305,9 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0")] fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; } let mut ret_val: float32x2x2_t = _vld1_f32_x2(a); @@ -21349,9 +21341,9 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0")] fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; } _vld1_f32_x3(a) @@ -21382,9 +21374,9 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0")] fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; } let mut ret_val: float32x2x3_t = _vld1_f32_x3(a); @@ -21419,9 +21411,9 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0")] fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; } _vld1_f32_x4(a) @@ -21452,9 +21444,9 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0")] fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; } let mut ret_val: float32x2x4_t = _vld1_f32_x4(a); @@ -21490,9 +21482,9 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0")] fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; } _vld1q_f32_x2(a) @@ -21523,9 +21515,9 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0")] fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; } let mut ret_val: float32x4x2_t = _vld1q_f32_x2(a); @@ -21559,9 +21551,9 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0")] fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; } _vld1q_f32_x3(a) @@ -21592,9 +21584,9 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0")] fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; } let mut ret_val: float32x4x3_t = _vld1q_f32_x3(a); @@ -21629,9 +21621,9 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0")] fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; } _vld1q_f32_x4(a) @@ -21662,9 +21654,9 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0")] fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; } let mut ret_val: float32x4x4_t = _vld1q_f32_x4(a); @@ -21675,6 +21667,25 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { ret_val } +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + let a: *const i8 = ptr as *const i8; + let b: i32 = crate::mem::align_of::() as i32; + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; + } + transmute(_vld1_v1i64(a, b)) +} + #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] #[doc = "## Safety"] @@ -21915,9 +21926,8 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { @@ -21930,9 +21940,8 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { @@ -21946,9 +21955,8 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { @@ -21961,9 +21969,8 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { @@ -21981,9 +21988,8 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { @@ -21996,9 +22002,8 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { @@ -22012,9 +22017,8 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { @@ -22027,9 +22031,8 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { @@ -22043,11 +22046,10 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32) } @@ -22058,11 +22060,10 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { let ret_val: int32x2_t = vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1]) @@ -22074,9 +22075,8 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { @@ -22089,9 +22089,8 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { @@ -22104,11 +22103,10 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { vld1_v1i64(ptr as *const i8, crate::mem::align_of::() as i32) } @@ -22119,9 +22117,8 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { @@ -22134,9 +22131,8 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { @@ -22169,9 +22165,9 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0")] fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; } _vld1_s8_x2(a) @@ -22202,9 +22198,9 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0")] fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; } let mut ret_val: int8x8x2_t = _vld1_s8_x2(a); @@ -22238,9 +22234,9 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0")] fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; } _vld1_s8_x3(a) @@ -22271,9 +22267,9 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0")] fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; } let mut ret_val: int8x8x3_t = _vld1_s8_x3(a); @@ -22308,9 +22304,9 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0")] fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; } _vld1_s8_x4(a) @@ -22341,9 +22337,9 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0")] fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; } let mut ret_val: int8x8x4_t = _vld1_s8_x4(a); @@ -22379,9 +22375,9 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0")] fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; } _vld1q_s8_x2(a) @@ -22412,9 +22408,9 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0")] fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; } let mut ret_val: int8x16x2_t = _vld1q_s8_x2(a); @@ -22456,9 +22452,9 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0")] fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; } _vld1q_s8_x3(a) @@ -22489,9 +22485,9 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0")] fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; } let mut ret_val: int8x16x3_t = _vld1q_s8_x3(a); @@ -22538,9 +22534,9 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0")] fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; } _vld1q_s8_x4(a) @@ -22571,9 +22567,9 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0")] fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; } let mut ret_val: int8x16x4_t = _vld1q_s8_x4(a); @@ -22625,9 +22621,9 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0")] fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; } _vld1_s16_x2(a) @@ -22658,9 +22654,9 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0")] fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; } let mut ret_val: int16x4x2_t = _vld1_s16_x2(a); @@ -22694,9 +22690,9 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0")] fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; } _vld1_s16_x3(a) @@ -22727,9 +22723,9 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0")] fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; } let mut ret_val: int16x4x3_t = _vld1_s16_x3(a); @@ -22764,9 +22760,9 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0")] fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; } _vld1_s16_x4(a) @@ -22797,9 +22793,9 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0")] fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; } let mut ret_val: int16x4x4_t = _vld1_s16_x4(a); @@ -22835,9 +22831,9 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0")] fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; } _vld1q_s16_x2(a) @@ -22868,9 +22864,9 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0")] fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; } let mut ret_val: int16x8x2_t = _vld1q_s16_x2(a); @@ -22904,9 +22900,9 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0")] fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; } _vld1q_s16_x3(a) @@ -22937,9 +22933,9 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0")] fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; } let mut ret_val: int16x8x3_t = _vld1q_s16_x3(a); @@ -22974,9 +22970,9 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0")] fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; } _vld1q_s16_x4(a) @@ -23007,9 +23003,9 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0")] fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; } let mut ret_val: int16x8x4_t = _vld1q_s16_x4(a); @@ -23045,9 +23041,9 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0")] fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; } _vld1_s32_x2(a) @@ -23078,9 +23074,9 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0")] fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; } let mut ret_val: int32x2x2_t = _vld1_s32_x2(a); @@ -23114,9 +23110,9 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0")] fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; } _vld1_s32_x3(a) @@ -23147,9 +23143,9 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0")] fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; } let mut ret_val: int32x2x3_t = _vld1_s32_x3(a); @@ -23184,9 +23180,9 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0")] fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; } _vld1_s32_x4(a) @@ -23217,9 +23213,9 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0")] fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; } let mut ret_val: int32x2x4_t = _vld1_s32_x4(a); @@ -23255,9 +23251,9 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0")] fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; } _vld1q_s32_x2(a) @@ -23288,9 +23284,9 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0")] fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; } let mut ret_val: int32x4x2_t = _vld1q_s32_x2(a); @@ -23324,9 +23320,9 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0")] fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; } _vld1q_s32_x3(a) @@ -23357,9 +23353,9 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0")] fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; } let mut ret_val: int32x4x3_t = _vld1q_s32_x3(a); @@ -23394,9 +23390,9 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0")] fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; } _vld1q_s32_x4(a) @@ -23427,9 +23423,9 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0")] fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; } let mut ret_val: int32x4x4_t = _vld1q_s32_x4(a); @@ -23464,9 +23460,9 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0")] fn _vld1_s64_x2(a: *const i64) -> int64x1x2_t; } _vld1_s64_x2(a) @@ -23496,9 +23492,9 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0")] fn _vld1_s64_x3(a: *const i64) -> int64x1x3_t; } _vld1_s64_x3(a) @@ -23528,9 +23524,9 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0")] fn _vld1_s64_x4(a: *const i64) -> int64x1x4_t; } _vld1_s64_x4(a) @@ -23561,9 +23557,9 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0")] fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; } _vld1q_s64_x2(a) @@ -23594,9 +23590,9 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0")] fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; } let mut ret_val: int64x2x2_t = _vld1q_s64_x2(a); @@ -23630,9 +23626,9 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0")] fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; } _vld1q_s64_x3(a) @@ -23663,9 +23659,9 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0")] fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; } let mut ret_val: int64x2x3_t = _vld1q_s64_x3(a); @@ -23700,9 +23696,9 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0")] fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; } _vld1q_s64_x4(a) @@ -23733,9 +23729,9 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64" + link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0")] fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; } let mut ret_val: int64x2x4_t = _vld1q_s64_x4(a); @@ -25677,9 +25673,8 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { extern "unadjusted" { @@ -25695,9 +25690,8 @@ unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { extern "unadjusted" { @@ -25713,9 +25707,8 @@ unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { extern "unadjusted" { @@ -25732,9 +25725,8 @@ unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { extern "unadjusted" { @@ -25750,9 +25742,8 @@ unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { extern "unadjusted" { @@ -25769,9 +25760,8 @@ unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { extern "unadjusted" { @@ -25787,9 +25777,8 @@ unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { extern "unadjusted" { @@ -25806,9 +25795,8 @@ unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { extern "unadjusted" { @@ -25824,9 +25812,8 @@ unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { extern "unadjusted" { @@ -25843,9 +25830,8 @@ unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { extern "unadjusted" { @@ -25861,9 +25847,8 @@ unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { extern "unadjusted" { @@ -25884,9 +25869,8 @@ unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { extern "unadjusted" { @@ -25902,9 +25886,8 @@ unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { extern "unadjusted" { @@ -25921,9 +25904,8 @@ unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { extern "unadjusted" { @@ -25939,9 +25921,8 @@ unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { extern "unadjusted" { @@ -25958,9 +25939,8 @@ unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { extern "unadjusted" { @@ -25976,9 +25956,8 @@ unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { extern "unadjusted" { @@ -25995,9 +25974,8 @@ unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { extern "unadjusted" { @@ -26013,9 +25991,8 @@ unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { extern "unadjusted" { @@ -26038,7 +26015,7 @@ unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } _vld2_dup_f32(a as *const i8, 4) @@ -26056,7 +26033,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } let mut ret_val: float32x2x2_t = _vld2_dup_f32(a as *const i8, 4); @@ -26077,7 +26054,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } _vld2q_dup_f32(a as *const i8, 4) @@ -26095,7 +26072,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } let mut ret_val: float32x4x2_t = _vld2q_dup_f32(a as *const i8, 4); @@ -26116,7 +26093,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } _vld2_dup_s8(a as *const i8, 1) @@ -26134,7 +26111,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } let mut ret_val: int8x8x2_t = _vld2_dup_s8(a as *const i8, 1); @@ -26155,7 +26132,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } _vld2q_dup_s8(a as *const i8, 1) @@ -26173,7 +26150,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } let mut ret_val: int8x16x2_t = _vld2q_dup_s8(a as *const i8, 1); @@ -26202,7 +26179,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } _vld2_dup_s16(a as *const i8, 2) @@ -26220,7 +26197,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } let mut ret_val: int16x4x2_t = _vld2_dup_s16(a as *const i8, 2); @@ -26241,7 +26218,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } _vld2q_dup_s16(a as *const i8, 2) @@ -26259,7 +26236,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } let mut ret_val: int16x8x2_t = _vld2q_dup_s16(a as *const i8, 2); @@ -26280,7 +26257,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } _vld2_dup_s32(a as *const i8, 4) @@ -26298,7 +26275,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } let mut ret_val: int32x2x2_t = _vld2_dup_s32(a as *const i8, 4); @@ -26319,7 +26296,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } _vld2q_dup_s32(a as *const i8, 4) @@ -26337,7 +26314,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } let mut ret_val: int32x4x2_t = _vld2q_dup_s32(a as *const i8, 4); @@ -26360,7 +26337,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" )] fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; } @@ -26381,7 +26358,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" )] fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; } @@ -26405,7 +26382,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" )] fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; } @@ -26426,7 +26403,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" )] fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; } @@ -26450,7 +26427,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" )] fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; } @@ -26471,7 +26448,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" )] fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; } @@ -26495,7 +26472,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" )] fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; } @@ -26516,7 +26493,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" )] fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; } @@ -26548,7 +26525,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" )] fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; } @@ -26569,7 +26546,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" )] fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; } @@ -26593,7 +26570,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" )] fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; } @@ -26614,7 +26591,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" )] fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; } @@ -26638,7 +26615,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" )] fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; } @@ -26659,7 +26636,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" )] fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; } @@ -26683,7 +26660,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" )] fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; } @@ -26704,7 +26681,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" )] fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; } @@ -26749,7 +26726,7 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } _vld2_dup_s64(a as *const i8, 8) @@ -26768,7 +26745,7 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" )] fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; } @@ -27357,7 +27334,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } _vld2_f32(a as *const i8, 4) @@ -27375,7 +27352,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } let mut ret_val: float32x2x2_t = _vld2_f32(a as *const i8, 4); @@ -27396,7 +27373,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } _vld2q_f32(a as *const i8, 4) @@ -27414,7 +27391,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } let mut ret_val: float32x4x2_t = _vld2q_f32(a as *const i8, 4); @@ -27435,7 +27412,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } _vld2_s8(a as *const i8, 1) @@ -27453,7 +27430,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } let mut ret_val: int8x8x2_t = _vld2_s8(a as *const i8, 1); @@ -27474,7 +27451,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } _vld2q_s8(a as *const i8, 1) @@ -27492,7 +27469,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } let mut ret_val: int8x16x2_t = _vld2q_s8(a as *const i8, 1); @@ -27521,7 +27498,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } _vld2_s16(a as *const i8, 2) @@ -27539,7 +27516,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } let mut ret_val: int16x4x2_t = _vld2_s16(a as *const i8, 2); @@ -27560,7 +27537,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } _vld2q_s16(a as *const i8, 2) @@ -27578,7 +27555,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } let mut ret_val: int16x8x2_t = _vld2q_s16(a as *const i8, 2); @@ -27599,7 +27576,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } _vld2_s32(a as *const i8, 4) @@ -27617,7 +27594,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } let mut ret_val: int32x2x2_t = _vld2_s32(a as *const i8, 4); @@ -27638,7 +27615,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } _vld2q_s32(a as *const i8, 4) @@ -27656,7 +27633,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } let mut ret_val: int32x4x2_t = _vld2q_s32(a as *const i8, 4); @@ -27679,7 +27656,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld2.v2f32.p0" )] fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; } @@ -27700,7 +27677,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld2.v2f32.p0" )] fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; } @@ -27724,7 +27701,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld2.v4f32.p0" )] fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; } @@ -27745,7 +27722,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld2.v4f32.p0" )] fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; } @@ -27769,7 +27746,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld2.v8i8.p0" )] fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; } @@ -27790,7 +27767,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld2.v8i8.p0" )] fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; } @@ -27814,7 +27791,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld2.v16i8.p0" )] fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; } @@ -27835,7 +27812,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld2.v16i8.p0" )] fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; } @@ -27867,7 +27844,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld2.v4i16.p0" )] fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; } @@ -27888,7 +27865,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld2.v4i16.p0" )] fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; } @@ -27912,7 +27889,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld2.v8i16.p0" )] fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; } @@ -27933,7 +27910,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld2.v8i16.p0" )] fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; } @@ -27957,7 +27934,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld2.v2i32.p0" )] fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; } @@ -27978,7 +27955,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld2.v2i32.p0" )] fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; } @@ -28002,7 +27979,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld2.v4i32.p0" )] fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; } @@ -28023,7 +28000,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld2.v4i32.p0" )] fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; } @@ -28049,7 +28026,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" )] fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; } @@ -28072,7 +28049,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" )] fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; } @@ -28101,7 +28078,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" )] fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) -> float32x4x2_t; @@ -28125,7 +28102,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" )] fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) -> float32x4x2_t; @@ -28155,7 +28132,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" )] fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; } @@ -28178,7 +28155,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" )] fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; } @@ -28207,7 +28184,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" )] fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; } @@ -28230,7 +28207,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" )] fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; } @@ -28259,7 +28236,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" )] fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; } @@ -28282,7 +28259,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" )] fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; } @@ -28311,7 +28288,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" )] fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; } @@ -28334,7 +28311,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" )] fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; } @@ -28363,7 +28340,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" )] fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; } @@ -28386,7 +28363,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" )] fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; } @@ -28413,7 +28390,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] fn _vld2_lane_f32( ptr: *const i8, a: float32x2_t, @@ -28439,7 +28416,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] fn _vld2_lane_f32( ptr: *const i8, a: float32x2_t, @@ -28471,7 +28448,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] fn _vld2q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -28497,7 +28474,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] fn _vld2q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -28529,7 +28506,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] fn _vld2q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -28555,7 +28532,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] fn _vld2q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -28587,7 +28564,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] fn _vld2q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -28613,7 +28590,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] fn _vld2q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -28645,7 +28622,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t; } @@ -28666,7 +28643,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t; } @@ -28693,7 +28670,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] fn _vld2_lane_s16( ptr: *const i8, a: int16x4_t, @@ -28719,7 +28696,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] fn _vld2_lane_s16( ptr: *const i8, a: int16x4_t, @@ -28751,7 +28728,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] fn _vld2_lane_s32( ptr: *const i8, a: int32x2_t, @@ -28777,7 +28754,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] fn _vld2_lane_s32( ptr: *const i8, a: int32x2_t, @@ -29310,7 +29287,7 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } _vld2_s64(a as *const i8, 8) @@ -29329,7 +29306,7 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1i64.p0v1i64" + link_name = "llvm.aarch64.neon.ld2.v1i64.p0" )] fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; } @@ -29920,7 +29897,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" )] fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; } @@ -29941,7 +29918,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" )] fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; } @@ -29966,7 +29943,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" )] fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; } @@ -29987,7 +29964,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" )] fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; } @@ -30012,7 +29989,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" )] fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; } @@ -30033,7 +30010,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" )] fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; } @@ -30058,7 +30035,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" )] fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; } @@ -30079,7 +30056,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" )] fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; } @@ -30116,7 +30093,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" )] fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; } @@ -30137,7 +30114,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" )] fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; } @@ -30162,7 +30139,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" )] fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; } @@ -30183,7 +30160,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" )] fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; } @@ -30208,7 +30185,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" )] fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; } @@ -30229,7 +30206,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" )] fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; } @@ -30254,7 +30231,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" )] fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; } @@ -30275,7 +30252,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" )] fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; } @@ -30299,7 +30276,7 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" )] fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; } @@ -30318,7 +30295,7 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } _vld3_dup_f32(a as *const i8, 4) @@ -30336,7 +30313,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } let mut ret_val: float32x2x3_t = _vld3_dup_f32(a as *const i8, 4); @@ -30358,7 +30335,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } _vld3q_dup_f32(a as *const i8, 4) @@ -30376,7 +30353,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } let mut ret_val: float32x4x3_t = _vld3q_dup_f32(a as *const i8, 4); @@ -30398,7 +30375,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } _vld3_dup_s8(a as *const i8, 1) @@ -30416,7 +30393,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } let mut ret_val: int8x8x3_t = _vld3_dup_s8(a as *const i8, 1); @@ -30438,7 +30415,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } _vld3q_dup_s8(a as *const i8, 1) @@ -30456,7 +30433,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } let mut ret_val: int8x16x3_t = _vld3q_dup_s8(a as *const i8, 1); @@ -30490,7 +30467,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } _vld3_dup_s16(a as *const i8, 2) @@ -30508,7 +30485,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } let mut ret_val: int16x4x3_t = _vld3_dup_s16(a as *const i8, 2); @@ -30530,7 +30507,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } _vld3q_dup_s16(a as *const i8, 2) @@ -30548,7 +30525,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } let mut ret_val: int16x8x3_t = _vld3q_dup_s16(a as *const i8, 2); @@ -30570,7 +30547,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } _vld3_dup_s32(a as *const i8, 4) @@ -30588,7 +30565,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } let mut ret_val: int32x2x3_t = _vld3_dup_s32(a as *const i8, 4); @@ -30610,7 +30587,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } _vld3q_dup_s32(a as *const i8, 4) @@ -30628,7 +30605,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } let mut ret_val: int32x4x3_t = _vld3q_dup_s32(a as *const i8, 4); @@ -30673,7 +30650,7 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } _vld3_dup_s64(a as *const i8, 8) @@ -31281,7 +31258,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld3.v2f32.p0" )] fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; } @@ -31302,7 +31279,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld3.v2f32.p0" )] fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; } @@ -31327,7 +31304,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld3.v4f32.p0" )] fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; } @@ -31348,7 +31325,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld3.v4f32.p0" )] fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; } @@ -31373,7 +31350,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld3.v8i8.p0" )] fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; } @@ -31394,7 +31371,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld3.v8i8.p0" )] fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; } @@ -31419,7 +31396,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld3.v16i8.p0" )] fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; } @@ -31440,7 +31417,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld3.v16i8.p0" )] fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; } @@ -31477,7 +31454,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld3.v4i16.p0" )] fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; } @@ -31498,7 +31475,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld3.v4i16.p0" )] fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; } @@ -31523,7 +31500,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld3.v8i16.p0" )] fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; } @@ -31544,7 +31521,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld3.v8i16.p0" )] fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; } @@ -31569,7 +31546,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld3.v2i32.p0" )] fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; } @@ -31590,7 +31567,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld3.v2i32.p0" )] fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; } @@ -31615,7 +31592,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld3.v4i32.p0" )] fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; } @@ -31636,7 +31613,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld3.v4i32.p0" )] fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; } @@ -31659,7 +31636,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } _vld3_f32(a as *const i8, 4) @@ -31677,7 +31654,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } let mut ret_val: float32x2x3_t = _vld3_f32(a as *const i8, 4); @@ -31699,7 +31676,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } _vld3q_f32(a as *const i8, 4) @@ -31717,7 +31694,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } let mut ret_val: float32x4x3_t = _vld3q_f32(a as *const i8, 4); @@ -31739,7 +31716,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } _vld3_s8(a as *const i8, 1) @@ -31757,7 +31734,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } let mut ret_val: int8x8x3_t = _vld3_s8(a as *const i8, 1); @@ -31779,7 +31756,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } _vld3q_s8(a as *const i8, 1) @@ -31797,7 +31774,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } let mut ret_val: int8x16x3_t = _vld3q_s8(a as *const i8, 1); @@ -31831,7 +31808,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } _vld3_s16(a as *const i8, 2) @@ -31849,7 +31826,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } let mut ret_val: int16x4x3_t = _vld3_s16(a as *const i8, 2); @@ -31871,7 +31848,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } _vld3q_s16(a as *const i8, 2) @@ -31889,7 +31866,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } let mut ret_val: int16x8x3_t = _vld3q_s16(a as *const i8, 2); @@ -31911,7 +31888,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } _vld3_s32(a as *const i8, 4) @@ -31929,7 +31906,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } let mut ret_val: int32x2x3_t = _vld3_s32(a as *const i8, 4); @@ -31951,7 +31928,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } _vld3q_s32(a as *const i8, 4) @@ -31969,7 +31946,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } let mut ret_val: int32x4x3_t = _vld3q_s32(a as *const i8, 4); @@ -31995,7 +31972,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" )] fn _vld3_lane_f32( a: float32x2_t, @@ -32024,7 +32001,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" )] fn _vld3_lane_f32( a: float32x2_t, @@ -32061,7 +32038,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" )] fn _vld3q_lane_f32( a: float32x4_t, @@ -32090,7 +32067,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" )] fn _vld3q_lane_f32( a: float32x4_t, @@ -32125,7 +32102,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] fn _vld3_lane_f32( ptr: *const i8, a: float32x2_t, @@ -32152,7 +32129,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] fn _vld3_lane_f32( ptr: *const i8, a: float32x2_t, @@ -32189,7 +32166,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" )] fn _vld3_lane_s8( a: int8x8_t, @@ -32218,7 +32195,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" )] fn _vld3_lane_s8( a: int8x8_t, @@ -32255,7 +32232,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" )] fn _vld3_lane_s16( a: int16x4_t, @@ -32284,7 +32261,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" )] fn _vld3_lane_s16( a: int16x4_t, @@ -32321,7 +32298,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" )] fn _vld3q_lane_s16( a: int16x8_t, @@ -32350,7 +32327,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" )] fn _vld3q_lane_s16( a: int16x8_t, @@ -32387,7 +32364,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" )] fn _vld3_lane_s32( a: int32x2_t, @@ -32416,7 +32393,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" )] fn _vld3_lane_s32( a: int32x2_t, @@ -32453,7 +32430,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" )] fn _vld3q_lane_s32( a: int32x4_t, @@ -32482,7 +32459,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" )] fn _vld3q_lane_s32( a: int32x4_t, @@ -32517,7 +32494,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] fn _vld3_lane_s8( ptr: *const i8, a: int8x8_t, @@ -32544,7 +32521,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] fn _vld3_lane_s8( ptr: *const i8, a: int8x8_t, @@ -32579,7 +32556,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] fn _vld3_lane_s16( ptr: *const i8, a: int16x4_t, @@ -32606,7 +32583,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] fn _vld3_lane_s16( ptr: *const i8, a: int16x4_t, @@ -32641,7 +32618,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] fn _vld3q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -32668,7 +32645,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] fn _vld3q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -32703,7 +32680,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] fn _vld3_lane_s32( ptr: *const i8, a: int32x2_t, @@ -32730,7 +32707,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] fn _vld3_lane_s32( ptr: *const i8, a: int32x2_t, @@ -32765,7 +32742,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] fn _vld3q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -32792,7 +32769,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] fn _vld3q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -33346,7 +33323,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1i64.p0v1i64" + link_name = "llvm.aarch64.neon.ld3.v1i64.p0" )] fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; } @@ -33364,7 +33341,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } _vld3_s64(a as *const i8, 8) @@ -33972,7 +33949,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] fn _vld3q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -33999,7 +33976,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] fn _vld3q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -34032,7 +34009,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } _vld4_dup_f32(a as *const i8, 4) @@ -34050,7 +34027,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } let mut ret_val: float32x2x4_t = _vld4_dup_f32(a as *const i8, 4); @@ -34073,7 +34050,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } _vld4q_dup_f32(a as *const i8, 4) @@ -34091,7 +34068,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } let mut ret_val: float32x4x4_t = _vld4q_dup_f32(a as *const i8, 4); @@ -34114,7 +34091,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } _vld4_dup_s8(a as *const i8, 1) @@ -34132,7 +34109,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } let mut ret_val: int8x8x4_t = _vld4_dup_s8(a as *const i8, 1); @@ -34155,7 +34132,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } _vld4q_dup_s8(a as *const i8, 1) @@ -34173,7 +34150,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } let mut ret_val: int8x16x4_t = _vld4q_dup_s8(a as *const i8, 1); @@ -34212,7 +34189,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } _vld4_dup_s16(a as *const i8, 2) @@ -34230,7 +34207,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } let mut ret_val: int16x4x4_t = _vld4_dup_s16(a as *const i8, 2); @@ -34253,7 +34230,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } _vld4q_dup_s16(a as *const i8, 2) @@ -34271,7 +34248,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } let mut ret_val: int16x8x4_t = _vld4q_dup_s16(a as *const i8, 2); @@ -34294,7 +34271,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } _vld4_dup_s32(a as *const i8, 4) @@ -34312,7 +34289,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } let mut ret_val: int32x2x4_t = _vld4_dup_s32(a as *const i8, 4); @@ -34335,7 +34312,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } _vld4q_dup_s32(a as *const i8, 4) @@ -34353,7 +34330,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } let mut ret_val: int32x4x4_t = _vld4q_dup_s32(a as *const i8, 4); @@ -34378,7 +34355,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" )] fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; } @@ -34399,7 +34376,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32" + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" )] fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; } @@ -34425,7 +34402,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" )] fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; } @@ -34446,7 +34423,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32" + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" )] fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; } @@ -34472,7 +34449,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" )] fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; } @@ -34493,7 +34470,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" )] fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; } @@ -34519,7 +34496,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" )] fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; } @@ -34540,7 +34517,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8" + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" )] fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; } @@ -34582,7 +34559,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" )] fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; } @@ -34603,7 +34580,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16" + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" )] fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; } @@ -34629,7 +34606,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" )] fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; } @@ -34650,7 +34627,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16" + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" )] fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; } @@ -34676,7 +34653,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" )] fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; } @@ -34697,7 +34674,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32" + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" )] fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; } @@ -34723,7 +34700,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" )] fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; } @@ -34744,7 +34721,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32" + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" )] fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; } @@ -34769,7 +34746,7 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64" + link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" )] fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; } @@ -34811,7 +34788,7 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } _vld4_dup_s64(a as *const i8, 8) @@ -35437,7 +35414,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld4.v2f32.p0" )] fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; } @@ -35458,7 +35435,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32" + link_name = "llvm.aarch64.neon.ld4.v2f32.p0" )] fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; } @@ -35484,7 +35461,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld4.v4f32.p0" )] fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; } @@ -35505,7 +35482,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32" + link_name = "llvm.aarch64.neon.ld4.v4f32.p0" )] fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; } @@ -35531,7 +35508,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld4.v8i8.p0" )] fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; } @@ -35552,7 +35529,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8" + link_name = "llvm.aarch64.neon.ld4.v8i8.p0" )] fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; } @@ -35578,7 +35555,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld4.v16i8.p0" )] fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; } @@ -35599,7 +35576,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8" + link_name = "llvm.aarch64.neon.ld4.v16i8.p0" )] fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; } @@ -35641,7 +35618,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld4.v4i16.p0" )] fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; } @@ -35662,7 +35639,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16" + link_name = "llvm.aarch64.neon.ld4.v4i16.p0" )] fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; } @@ -35688,7 +35665,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld4.v8i16.p0" )] fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; } @@ -35709,7 +35686,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16" + link_name = "llvm.aarch64.neon.ld4.v8i16.p0" )] fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; } @@ -35735,7 +35712,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld4.v2i32.p0" )] fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; } @@ -35756,7 +35733,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32" + link_name = "llvm.aarch64.neon.ld4.v2i32.p0" )] fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; } @@ -35782,7 +35759,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld4.v4i32.p0" )] fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; } @@ -35803,7 +35780,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32" + link_name = "llvm.aarch64.neon.ld4.v4i32.p0" )] fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; } @@ -35827,7 +35804,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } _vld4_f32(a as *const i8, 4) @@ -35845,7 +35822,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } let mut ret_val: float32x2x4_t = _vld4_f32(a as *const i8, 4); @@ -35868,7 +35845,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } _vld4q_f32(a as *const i8, 4) @@ -35886,7 +35863,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } let mut ret_val: float32x4x4_t = _vld4q_f32(a as *const i8, 4); @@ -35909,7 +35886,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } _vld4_s8(a as *const i8, 1) @@ -35927,7 +35904,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } let mut ret_val: int8x8x4_t = _vld4_s8(a as *const i8, 1); @@ -35950,7 +35927,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } _vld4q_s8(a as *const i8, 1) @@ -35968,7 +35945,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } let mut ret_val: int8x16x4_t = _vld4q_s8(a as *const i8, 1); @@ -36007,7 +35984,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } _vld4_s16(a as *const i8, 2) @@ -36025,7 +36002,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } let mut ret_val: int16x4x4_t = _vld4_s16(a as *const i8, 2); @@ -36048,7 +36025,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } _vld4q_s16(a as *const i8, 2) @@ -36066,7 +36043,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } let mut ret_val: int16x8x4_t = _vld4q_s16(a as *const i8, 2); @@ -36089,7 +36066,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } _vld4_s32(a as *const i8, 4) @@ -36107,7 +36084,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } let mut ret_val: int32x2x4_t = _vld4_s32(a as *const i8, 4); @@ -36130,7 +36107,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } _vld4q_s32(a as *const i8, 4) @@ -36148,7 +36125,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } let mut ret_val: int32x4x4_t = _vld4q_s32(a as *const i8, 4); @@ -36175,7 +36152,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" )] fn _vld4_lane_f32( a: float32x2_t, @@ -36205,7 +36182,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" )] fn _vld4_lane_f32( a: float32x2_t, @@ -36245,7 +36222,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" )] fn _vld4q_lane_f32( a: float32x4_t, @@ -36275,7 +36252,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" )] fn _vld4q_lane_f32( a: float32x4_t, @@ -36315,7 +36292,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" )] fn _vld4_lane_s8( a: int8x8_t, @@ -36345,7 +36322,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" )] fn _vld4_lane_s8( a: int8x8_t, @@ -36385,7 +36362,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" )] fn _vld4_lane_s16( a: int16x4_t, @@ -36415,7 +36392,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" )] fn _vld4_lane_s16( a: int16x4_t, @@ -36455,7 +36432,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" )] fn _vld4q_lane_s16( a: int16x8_t, @@ -36485,7 +36462,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" )] fn _vld4q_lane_s16( a: int16x8_t, @@ -36525,7 +36502,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" )] fn _vld4_lane_s32( a: int32x2_t, @@ -36555,7 +36532,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" )] fn _vld4_lane_s32( a: int32x2_t, @@ -36595,7 +36572,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" )] fn _vld4q_lane_s32( a: int32x4_t, @@ -36625,7 +36602,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" )] fn _vld4q_lane_s32( a: int32x4_t, @@ -36663,7 +36640,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] fn _vld4_lane_f32( ptr: *const i8, a: float32x2_t, @@ -36691,7 +36668,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] fn _vld4_lane_f32( ptr: *const i8, a: float32x2_t, @@ -36729,7 +36706,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] fn _vld4q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -36757,7 +36734,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] fn _vld4q_lane_f32( ptr: *const i8, a: float32x4_t, @@ -36795,7 +36772,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] fn _vld4_lane_s8( ptr: *const i8, a: int8x8_t, @@ -36823,7 +36800,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] fn _vld4_lane_s8( ptr: *const i8, a: int8x8_t, @@ -36861,7 +36838,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] fn _vld4_lane_s16( ptr: *const i8, a: int16x4_t, @@ -36889,7 +36866,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] fn _vld4_lane_s16( ptr: *const i8, a: int16x4_t, @@ -36927,7 +36904,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] fn _vld4q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -36955,7 +36932,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] fn _vld4q_lane_s16( ptr: *const i8, a: int16x8_t, @@ -36993,7 +36970,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] fn _vld4_lane_s32( ptr: *const i8, a: int32x2_t, @@ -37021,7 +36998,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] fn _vld4_lane_s32( ptr: *const i8, a: int32x2_t, @@ -37059,7 +37036,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] fn _vld4q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -37087,7 +37064,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] fn _vld4q_lane_s32( ptr: *const i8, a: int32x4_t, @@ -37660,7 +37637,7 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0v1i64" + link_name = "llvm.aarch64.neon.ld4.v1i64.p0" )] fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; } @@ -37678,7 +37655,7 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } _vld4_s64(a as *const i8, 8) @@ -51960,9 +51937,9 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i8" + link_name = "llvm.aarch64.neon.pmull.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; } _vmull_p8(a, b) @@ -51993,9 +51970,9 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i8" + link_name = "llvm.aarch64.neon.pmull.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; } let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -54580,7 +54557,7 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } _vpadd_s8(a, b) @@ -54613,7 +54590,7 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -54649,7 +54626,7 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } _vpadd_s16(a, b) @@ -54682,7 +54659,7 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -54718,7 +54695,7 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } _vpadd_s32(a, b) @@ -54751,7 +54728,7 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vaddp.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); @@ -54927,7 +54904,7 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -54960,7 +54937,7 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -54995,7 +54972,7 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55028,7 +55005,7 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55063,7 +55040,7 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55096,7 +55073,7 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55131,7 +55108,7 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55164,7 +55141,7 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55199,7 +55176,7 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55232,7 +55209,7 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55266,7 +55243,7 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55299,7 +55276,7 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(saddlp) @@ -55334,7 +55311,7 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55367,7 +55344,7 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55402,7 +55379,7 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55435,7 +55412,7 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55470,7 +55447,7 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55503,7 +55480,7 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55538,7 +55515,7 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55571,7 +55548,7 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55606,7 +55583,7 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55639,7 +55616,7 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55673,7 +55650,7 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55706,7 +55683,7 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uaddlp) @@ -55741,7 +55718,7 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmaxp) @@ -55774,7 +55751,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmaxp) @@ -56017,7 +55994,7 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56050,7 +56027,7 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56086,7 +56063,7 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56119,7 +56096,7 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56155,7 +56132,7 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56188,7 +56165,7 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmaxu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(umaxp) @@ -56224,7 +56201,7 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmins))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fminp) @@ -56257,7 +56234,7 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmins))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fminp) @@ -56500,7 +56477,7 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -56533,7 +56510,7 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -56569,7 +56546,7 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -56602,7 +56579,7 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -56638,7 +56615,7 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -56671,7 +56648,7 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpminu))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uminp) @@ -67505,7 +67482,7 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } _vqsub_s8(a, b) @@ -67538,7 +67515,7 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -67574,7 +67551,7 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } _vqsubq_s8(a, b) @@ -67607,7 +67584,7 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); @@ -67647,7 +67624,7 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } _vqsub_s16(a, b) @@ -67680,7 +67657,7 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -67716,7 +67693,7 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } _vqsubq_s16(a, b) @@ -67749,7 +67726,7 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -67785,7 +67762,7 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } _vqsub_s32(a, b) @@ -67818,7 +67795,7 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); @@ -67854,7 +67831,7 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } _vqsubq_s32(a, b) @@ -67887,7 +67864,7 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -67922,7 +67899,7 @@ pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v1i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } _vqsub_s64(a, b) @@ -67955,7 +67932,7 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } _vqsubq_s64(a, b) @@ -67988,7 +67965,7 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); @@ -68024,7 +68001,7 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() @@ -68057,7 +68034,7 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -68093,7 +68070,7 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() @@ -68126,7 +68103,7 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); @@ -68166,7 +68143,7 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() @@ -68199,7 +68176,7 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -68235,7 +68212,7 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() @@ -68268,7 +68245,7 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -68304,7 +68281,7 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() @@ -68337,7 +68314,7 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); @@ -68373,7 +68350,7 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() @@ -68406,7 +68383,7 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -68441,7 +68418,7 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v1i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() @@ -68474,7 +68451,7 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() @@ -68507,7 +68484,7 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); @@ -68887,9 +68864,9 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i16" + link_name = "llvm.aarch64.neon.raddhn.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; } _vraddhn_s16(a, b) @@ -68920,9 +68897,9 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i16" + link_name = "llvm.aarch64.neon.raddhn.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; } let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -68956,9 +68933,9 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i32" + link_name = "llvm.aarch64.neon.raddhn.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; } _vraddhn_s32(a, b) @@ -68989,9 +68966,9 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i32" + link_name = "llvm.aarch64.neon.raddhn.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; } let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -69025,9 +69002,9 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i64" + link_name = "llvm.aarch64.neon.raddhn.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; } _vraddhn_s64(a, b) @@ -69058,9 +69035,9 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i64" + link_name = "llvm.aarch64.neon.raddhn.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; } let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); @@ -90835,10 +90812,7 @@ pub unsafe fn vsha256su1q_u32( #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] @@ -90854,10 +90828,7 @@ unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] @@ -90880,10 +90851,7 @@ unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] @@ -90899,10 +90867,7 @@ unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] @@ -90918,10 +90883,7 @@ unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] @@ -90941,10 +90903,7 @@ unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] @@ -90960,10 +90919,7 @@ unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64"))] unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] @@ -90983,10 +90939,7 @@ unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] @@ -91002,10 +90955,7 @@ unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] @@ -91025,10 +90975,7 @@ unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] @@ -91044,10 +90991,7 @@ unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32"))] unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] @@ -91067,10 +91011,7 @@ unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] @@ -91086,10 +91027,7 @@ unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16"))] unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] @@ -91109,10 +91047,7 @@ unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] @@ -91128,10 +91063,7 @@ unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8"))] unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] @@ -94788,7 +94720,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); vshiftins_v2i32(a, b, int32x2_t::splat(N)) } @@ -94804,7 +94736,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(N)); @@ -94823,7 +94755,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); vshiftins_v4i32(a, b, int32x4_t::splat(N)) } @@ -94839,7 +94771,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(N)); @@ -94857,7 +94789,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) } @@ -94873,7 +94805,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) } @@ -94889,7 +94821,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)); @@ -95084,7 +95016,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v2i32( transmute(a), transmute(b), @@ -95104,7 +95036,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); let ret_val: uint32x2_t = transmute(vshiftins_v2i32( @@ -95127,7 +95059,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v4i32( transmute(a), transmute(b), @@ -95147,7 +95079,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(0 >= N && N <= 31); + static_assert!(N >= 0 && N <= 31); let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); let ret_val: uint32x4_t = transmute(vshiftins_v4i32( @@ -95169,7 +95101,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vshiftins_v1i64( transmute(a), transmute(b), @@ -95189,7 +95121,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vshiftins_v2i64( transmute(a), transmute(b), @@ -95209,7 +95141,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(0 >= N && N <= 63); + static_assert!(N >= 0 && N <= 63); let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); let ret_val: uint64x2_t = transmute(vshiftins_v2i64( @@ -95396,68 +95328,6 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(0 >= N && N <= 63); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(N as i64), - )) -} - -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(0 >= N && N <= 63); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )) -} - -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(0 >= N && N <= 63); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] #[doc = "## Safety"] @@ -97080,68 +96950,6 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(-N as i64), - )) -} - -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )) -} - -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(1 <= N && N <= 64); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] @@ -97967,7 +97775,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); } _vst1_f32_x2(a, b.0, b.1) @@ -97985,7 +97793,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); } let mut b: float32x2x2_t = b; @@ -98006,7 +97814,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); } _vst1q_f32_x2(a, b.0, b.1) @@ -98024,7 +97832,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); } let mut b: float32x4x2_t = b; @@ -98135,7 +97943,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); } _vst1_f32_x3(a, b.0, b.1, b.2) @@ -98153,7 +97961,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); } let mut b: float32x2x3_t = b; @@ -98175,7 +97983,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); } _vst1q_f32_x3(a, b.0, b.1, b.2) @@ -98193,7 +98001,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); } let mut b: float32x4x3_t = b; @@ -98307,7 +98115,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] fn _vst1_f32_x4( ptr: *mut f32, a: float32x2_t, @@ -98331,7 +98139,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] fn _vst1_f32_x4( ptr: *mut f32, a: float32x2_t, @@ -98360,7 +98168,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] fn _vst1q_f32_x4( ptr: *mut f32, a: float32x4_t, @@ -98384,7 +98192,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] fn _vst1q_f32_x4( ptr: *mut f32, a: float32x4_t, @@ -100286,7 +100094,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } _vst1_s8_x2(a, b.0, b.1) @@ -100304,7 +100112,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } let mut b: int8x8x2_t = b; @@ -100325,7 +100133,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } _vst1q_s8_x2(a, b.0, b.1) @@ -100343,7 +100151,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } let mut b: int8x16x2_t = b; @@ -100372,7 +100180,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } _vst1_s16_x2(a, b.0, b.1) @@ -100390,7 +100198,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } let mut b: int16x4x2_t = b; @@ -100411,7 +100219,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } _vst1q_s16_x2(a, b.0, b.1) @@ -100429,7 +100237,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } let mut b: int16x8x2_t = b; @@ -100450,7 +100258,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } _vst1_s32_x2(a, b.0, b.1) @@ -100468,7 +100276,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } let mut b: int32x2x2_t = b; @@ -100489,7 +100297,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } _vst1q_s32_x2(a, b.0, b.1) @@ -100507,7 +100315,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } let mut b: int32x4x2_t = b; @@ -100527,7 +100335,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); } _vst1_s64_x2(a, b.0, b.1) @@ -100545,7 +100353,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } _vst1q_s64_x2(a, b.0, b.1) @@ -100563,7 +100371,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } let mut b: int64x2x2_t = b; @@ -100938,7 +100746,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); } _vst1_s8_x3(a, b.0, b.1, b.2) @@ -100956,7 +100764,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); } let mut b: int8x8x3_t = b; @@ -100978,7 +100786,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); } _vst1q_s8_x3(a, b.0, b.1, b.2) @@ -100996,7 +100804,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); } let mut b: int8x16x3_t = b; @@ -101030,7 +100838,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); } _vst1_s16_x3(a, b.0, b.1, b.2) @@ -101048,7 +100856,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); } let mut b: int16x4x3_t = b; @@ -101070,7 +100878,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } _vst1q_s16_x3(a, b.0, b.1, b.2) @@ -101088,7 +100896,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } let mut b: int16x8x3_t = b; @@ -101110,7 +100918,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } _vst1_s32_x3(a, b.0, b.1, b.2) @@ -101128,7 +100936,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } let mut b: int32x2x3_t = b; @@ -101150,7 +100958,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } _vst1q_s32_x3(a, b.0, b.1, b.2) @@ -101168,7 +100976,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } let mut b: int32x4x3_t = b; @@ -101189,7 +100997,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64.p0")] fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); } _vst1_s64_x3(a, b.0, b.1, b.2) @@ -101207,7 +101015,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } _vst1q_s64_x3(a, b.0, b.1, b.2) @@ -101225,7 +101033,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } let mut b: int64x2x3_t = b; @@ -101612,7 +101420,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } _vst1_s8_x4(a, b.0, b.1, b.2, b.3) @@ -101630,7 +101438,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } let mut b: int8x8x4_t = b; @@ -101653,7 +101461,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) @@ -101671,7 +101479,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } let mut b: int8x16x4_t = b; @@ -101710,7 +101518,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } _vst1_s16_x4(a, b.0, b.1, b.2, b.3) @@ -101728,7 +101536,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } let mut b: int16x4x4_t = b; @@ -101751,7 +101559,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) @@ -101769,7 +101577,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } let mut b: int16x8x4_t = b; @@ -101792,7 +101600,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); } _vst1_s32_x4(a, b.0, b.1, b.2, b.3) @@ -101810,7 +101618,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); } let mut b: int32x2x4_t = b; @@ -101833,7 +101641,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); } _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) @@ -101851,7 +101659,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); } let mut b: int32x4x4_t = b; @@ -101873,7 +101681,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64.p0")] fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); } _vst1_s64_x4(a, b.0, b.1, b.2, b.3) @@ -101891,7 +101699,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); } _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) @@ -101909,7 +101717,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); } let mut b: int64x2x4_t = b; @@ -103858,7 +103666,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); } _vst1_v1i64(addr, val, align) @@ -103877,7 +103685,7 @@ unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); } _vst1_v2f32(addr, val, align) @@ -103896,7 +103704,7 @@ unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); } let val: float32x2_t = simd_shuffle!(val, val, [0, 1]); @@ -103916,7 +103724,7 @@ unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); } _vst1_v2i32(addr, val, align) @@ -103935,7 +103743,7 @@ unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); } let val: int32x2_t = simd_shuffle!(val, val, [0, 1]); @@ -103955,7 +103763,7 @@ unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); } _vst1_v4i16(addr, val, align) @@ -103974,7 +103782,7 @@ unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); } let val: int16x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); @@ -103994,7 +103802,7 @@ unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); } _vst1_v8i8(addr, val, align) @@ -104013,7 +103821,7 @@ unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); } let val: int8x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -104033,7 +103841,7 @@ unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); } _vst1q_v16i8(addr, val, align) @@ -104052,7 +103860,7 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); } let val: int8x16_t = simd_shuffle!( @@ -104076,7 +103884,7 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); } _vst1q_v2i64(addr, val, align) @@ -104095,7 +103903,7 @@ unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v2i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); } let val: int64x2_t = simd_shuffle!(val, val, [0, 1]); @@ -104115,7 +103923,7 @@ unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); } _vst1q_v4f32(addr, val, align) @@ -104134,7 +103942,7 @@ unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); } let val: float32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); @@ -104154,7 +103962,7 @@ unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); } _vst1q_v4i32(addr, val, align) @@ -104173,7 +103981,7 @@ unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); } let val: int32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); @@ -104193,7 +104001,7 @@ unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); } _vst1q_v8i16(addr, val, align) @@ -104212,7 +104020,7 @@ unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); } let val: int16x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -104654,7 +104462,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } _vst2_f32(a as _, b.0, b.1, 4) @@ -104672,7 +104480,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } let mut b: float32x2x2_t = b; @@ -104693,7 +104501,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } _vst2q_f32(a as _, b.0, b.1, 4) @@ -104711,7 +104519,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } let mut b: float32x4x2_t = b; @@ -104732,7 +104540,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } _vst2_s8(a as _, b.0, b.1, 1) @@ -104750,7 +104558,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } let mut b: int8x8x2_t = b; @@ -104771,7 +104579,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } _vst2q_s8(a as _, b.0, b.1, 1) @@ -104789,7 +104597,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } let mut b: int8x16x2_t = b; @@ -104818,7 +104626,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } _vst2_s16(a as _, b.0, b.1, 2) @@ -104836,7 +104644,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } let mut b: int16x4x2_t = b; @@ -104857,7 +104665,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } _vst2q_s16(a as _, b.0, b.1, 2) @@ -104875,7 +104683,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } let mut b: int16x8x2_t = b; @@ -104896,7 +104704,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } _vst2_s32(a as _, b.0, b.1, 4) @@ -104914,7 +104722,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } let mut b: int32x2x2_t = b; @@ -104935,7 +104743,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } _vst2q_s32(a as _, b.0, b.1, 4) @@ -104953,7 +104761,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } let mut b: int32x4x2_t = b; @@ -105319,7 +105127,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) @@ -105339,7 +105147,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } let mut b: float32x2x2_t = b; @@ -105362,7 +105170,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) @@ -105382,7 +105190,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } let mut b: float32x4x2_t = b; @@ -105405,7 +105213,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) @@ -105425,7 +105233,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } let mut b: int8x8x2_t = b; @@ -105448,7 +105256,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) @@ -105468,7 +105276,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } let mut b: int16x4x2_t = b; @@ -105491,7 +105299,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) @@ -105511,7 +105319,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } let mut b: int16x8x2_t = b; @@ -105534,7 +105342,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) @@ -105554,7 +105362,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } let mut b: int32x2x2_t = b; @@ -105577,7 +105385,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) @@ -105597,7 +105405,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } let mut b: int32x4x2_t = b; @@ -106097,7 +105905,7 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v1i64")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v1i64.p0")] fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); } _vst2_s64(a as _, b.0, b.1, 8) diff --git a/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs b/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs index 04ebe8ab3b..cfb1a2843a 100644 --- a/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs +++ b/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs @@ -50,8 +50,8 @@ test_vsli!(test_vsli_n_p8, i8 => vsli_n_p8([3, 44, 127, 56, 0, 24, 97, 10], [127 test_vsli!(test_vsliq_n_p8, i8 => vsliq_n_p8([3, 44, 127, 56, 0, 24, 97, 10, 33, 1, 6, 39, 15, 101, 80, 1], [127, 14, 125, 77, 27, 8, 1, 110, 4, 92, 111, 32, 1, 4, 29, 99], 2)); test_vsli!(test_vsli_n_p16, i16 => vsli_n_p16([3304, 44, 2300, 546], [1208, 140, 1225, 707], 7)); test_vsli!(test_vsliq_n_p16, i16 => vsliq_n_p16([3304, 44, 2300, 20046, 0, 9924, 907, 1190], [1208, 140, 4225, 707, 2701, 804, 71, 2110], 14)); -test_vsli!(test_vsli_n_p64, i64 => vsli_n_p64([333333], [1028], 45)); -test_vsli!(test_vsliq_n_p64, i64 => vsliq_n_p64([333333, 52023], [1028, 99814], 33)); +//test_vsli!(test_vsli_n_p64, i64 => vsli_n_p64([333333], [1028], 45)); +//test_vsli!(test_vsliq_n_p64, i64 => vsliq_n_p64([333333, 52023], [1028, 99814], 33)); macro_rules! test_vsri { ($test_id:ident, $t:ty => $fn_id:ident ([$($a:expr),*], [$($b:expr),*], $n:expr)) => { @@ -89,5 +89,5 @@ test_vsri!(test_vsri_n_p8, i8 => vsri_n_p8([3, 44, 127, 56, 0, 24, 97, 10], [127 test_vsri!(test_vsriq_n_p8, i8 => vsriq_n_p8([3, 44, 127, 56, 0, 24, 97, 10, 33, 1, 6, 39, 15, 101, 80, 1], [127, 14, 125, 77, 27, 8, 1, 110, 4, 92, 111, 32, 1, 4, 29, 99], 2)); test_vsri!(test_vsri_n_p16, i16 => vsri_n_p16([3304, 44, 2300, 546], [1208, 140, 1225, 707], 7)); test_vsri!(test_vsriq_n_p16, i16 => vsriq_n_p16([3304, 44, 2300, 20046, 0, 9924, 907, 1190], [1208, 140, 4225, 707, 2701, 804, 71, 2110], 14)); -test_vsri!(test_vsri_n_p64, i64 => vsri_n_p64([333333], [1028], 45)); -test_vsri!(test_vsriq_n_p64, i64 => vsriq_n_p64([333333, 52023], [1028, 99814], 33)); +//test_vsri!(test_vsri_n_p64, i64 => vsri_n_p64([333333], [1028], 45)); +//test_vsri!(test_vsriq_n_p64, i64 => vsriq_n_p64([333333, 52023], [1028, 99814], 33)); diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 0967476950..99a3aed8d6 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -1844,7 +1844,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uqsub.v{neon_type[0].lane}{type[2]}" arch: aarch64,arm64ec - - link: "llvm.usub.sat.{neon_type[0].lane}{type[2]}" + - link: "llvm.usub.sat.v{neon_type[0].lane}{type[2]}" arch: arm - name: "vqsub{neon_type[0].no}" @@ -1874,7 +1874,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.sqsub.v{neon_type[0].lane}{type[2]}" arch: aarch64,arm64ec - - link: "llvm.ssub.sat.{neon_type[0].lane}{type[2]}" + - link: "llvm.ssub.sat.v{neon_type[0].lane}{type[2]}" arch: arm - name: "vhadd{neon_type.no}" @@ -2172,9 +2172,9 @@ intrinsics: - LLVMLink: name: "vld1x{neon_type[1].tuple}.{neon_type[1]}" links: - - link: "llvm.aarch64.neon.ld1x{neon_type[1].tuple}.v{neon_type[1].lane}f{neon_type[1].base}.p0f{neon_type[1].base}" + - link: "llvm.aarch64.neon.ld1x{neon_type[1].tuple}.v{neon_type[1].lane}f{neon_type[1].base}.p0" arch: aarch64,arm64ec - - link: "llvm.arm.neon.vld1x{neon_type[1].tuple}.v{neon_type[1].lane}f{neon_type[1].base}.p0f{neon_type[1].base}" + - link: "llvm.arm.neon.vld1x{neon_type[1].tuple}.v{neon_type[1].lane}f{neon_type[1].base}.p0" arch: arm - name: "vld1{neon_type[1].no}" @@ -2218,9 +2218,9 @@ intrinsics: - LLVMLink: name: "ld1x{neon_type[1].tuple}.{neon_type[1]}" links: - - link: "llvm.aarch64.neon.ld1x{neon_type[1].tuple}.v{neon_type[1].lane}i{neon_type[1].base}.p0i{neon_type[1].base}" + - link: "llvm.aarch64.neon.ld1x{neon_type[1].tuple}.v{neon_type[1].lane}i{neon_type[1].base}.p0" arch: aarch64,arm64ec - - link: "llvm.arm.neon.vld1x{neon_type[1].tuple}.v{neon_type[1].lane}i{neon_type[1].base}.p0i{neon_type[1].base}" + - link: "llvm.arm.neon.vld1x{neon_type[1].tuple}.v{neon_type[1].lane}i{neon_type[1].base}.p0" arch: arm - name: "vld1{neon_type[1].no}" @@ -2360,7 +2360,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld2.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2.v{neon_type[1].lane}{type[2]}" arch: arm - FnCall: - "_vld2{neon_type[1].nox}" @@ -2387,7 +2387,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld2.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2.v{neon_type[1].lane}{type[2]}" arch: arm - FnCall: - "_vld2{neon_type[1].nox}" @@ -2419,7 +2419,7 @@ intrinsics: arguments: - "ptr: *const {neon_type[3]}" links: - - link: "llvm.aarch64.neon.ld2.v{neon_type[1].lane}{type[2]}.p0v{neon_type[1].lane}{type[2]}" + - link: "llvm.aarch64.neon.ld2.v{neon_type[1].lane}{type[2]}.p0" arch: aarch64,arm64ec - FnCall: - "_vld2{neon_type[1].nox}" @@ -2443,7 +2443,7 @@ intrinsics: arguments: - "ptr: *const {neon_type[3]}" links: - - link: "llvm.aarch64.neon.ld2.v{neon_type[1].lane}{type[2]}.p0v{neon_type[1].lane}{type[2]}" + - link: "llvm.aarch64.neon.ld2.v{neon_type[1].lane}{type[2]}.p0" arch: aarch64,arm64ec - FnCall: - "_vld2{neon_type[1].nox}" @@ -2562,7 +2562,7 @@ intrinsics: - "n: i32" - "size: i32" links: - - link: "llvm.arm.neon.vld2lane.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2lane.v{neon_type[1].lane}{type[2]}.p0" arch: arm - FnCall: - "_vld2_lane{neon_type[1].nox}" @@ -2648,7 +2648,7 @@ intrinsics: - "n: i64" - "ptr: *const i8" links: - - link: "llvm.aarch64.neon.ld2lane.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.aarch64.neon.ld2lane.v{neon_type[1].lane}{type[2]}.p0" arch: aarch64,arm64ec - FnCall: - "_vld2{neon_type[1].lane_nox}" @@ -2696,7 +2696,7 @@ intrinsics: - "n: i32" - "size: i32" links: - - link: "llvm.arm.neon.vld2lane.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2lane.v{neon_type[1].lane}{type[2]}.p0" arch: arm - FnCall: - "_vld2{neon_type[1].lane_nox}" @@ -2726,7 +2726,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld2dup.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2dup.v{neon_type[1].lane}{type[2]}.p0" arch: arm - FnCall: - "_vld2{neon_type[1].dup_nox}" @@ -2751,7 +2751,7 @@ intrinsics: arguments: - "ptr: *const i64" links: - - link: "llvm.aarch64.neon.ld2r.v{neon_type[1].lane}{type[2]}.p0i64" + - link: "llvm.aarch64.neon.ld2r.v{neon_type[1].lane}{type[2]}.p0" arch: aarch64,arm64ec - FnCall: - "_vld2{neon_type[1].dup_nox}" @@ -2784,7 +2784,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld2dup.v{neon_type[1].lane}{type[2]}.p0i8" + - link: "llvm.arm.neon.vld2dup.v{neon_type[1].lane}{type[2]}.p0" arch: arm - FnCall: - "_vld2{neon_type[1].dup_nox}" @@ -2899,7 +2899,7 @@ intrinsics: arguments: - "ptr: {type[0]}" links: - - link: "llvm.aarch64.neon.ld2r.v{neon_type[1].lane}{type[2]}.p0{type[2]}" + - link: "llvm.aarch64.neon.ld2r.v{neon_type[1].lane}{type[2]}.p0" arch: aarch64,arm64ec - FnCall: - "_vld2{neon_type[1].dup_nox}" @@ -2935,7 +2935,7 @@ intrinsics: - 'n: i64' - 'ptr: *const i8' links: - - link: 'llvm.aarch64.neon.ld3lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.aarch64.neon.ld3lane.v{neon_type[1].lane}{type[3]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld3{neon_type[1].lane_nox}', ['b.0', 'b.1', 'b.2', 'LANE as i64', 'a as _']] @@ -2964,7 +2964,7 @@ intrinsics: arguments: - 'ptr: {type[2]}' links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0v{neon_type[1].lane}{type[3]}' + - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] @@ -2986,7 +2986,7 @@ intrinsics: arguments: - 'ptr: {type[2]}' links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0v{neon_type[1].lane}{type[3]}' + - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] @@ -3017,7 +3017,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld3.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].nox}', ['a as *const i8', '{neon_type[1].base_byte_size}']] @@ -3041,7 +3041,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld3.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].nox}', ['a as *const i8', '{neon_type[1].base_byte_size}']] @@ -3071,7 +3071,7 @@ intrinsics: - 'n: i64' - 'ptr: *const i8' links: - - link: 'llvm.aarch64.neon.ld3lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.aarch64.neon.ld3lane.v{neon_type[1].lane}{type[3]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld3{neon_type[1].lane_nox}', ['b.0', 'b.1', 'b.2', 'LANE as i64', 'a as _']] @@ -3102,7 +3102,7 @@ intrinsics: - 'n: i32' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].lane_nox}', ['a as _', 'b.0', 'b.1', 'b.2', 'LANE', '{type[5]}']] @@ -3137,7 +3137,7 @@ intrinsics: - 'n: i32' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].lane_nox}', ['a as _', 'b.0', 'b.1', 'b.2', 'LANE', '{type[5]}']] @@ -3168,7 +3168,7 @@ intrinsics: - 'n: i32' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.arm.neon.vld3lane.v{neon_type[1].lane}{type[3]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].lane_nox}', ['a as _', 'b.0', 'b.1', 'b.2', 'LANE', '{type[5]}']] @@ -3303,7 +3303,7 @@ intrinsics: arguments: - 'ptr: {type[0]}' links: - - link: 'llvm.aarch64.neon.ld3r.v{neon_type[1].lane}{type[2]}.p0{type[2]}' + - link: 'llvm.aarch64.neon.ld3r.v{neon_type[1].lane}{type[2]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld3{neon_type[1].dup_nox}', ['a as _']] @@ -3331,7 +3331,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3dup.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld3dup.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].dup_nox}', ['a as *const i8', '{type[3]}']] @@ -3384,7 +3384,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld3dup.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld3dup.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld3{neon_type[1].dup_nox}', ['a as *const i8', '{type[3]}']] @@ -3460,7 +3460,7 @@ intrinsics: arguments: - 'ptr: {type[3]}' links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0v{neon_type[1].lane}{type[2]}' + - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] @@ -3480,7 +3480,7 @@ intrinsics: arguments: - 'ptr: {type[3]}' links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0v{neon_type[1].lane}{type[2]}' + - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] @@ -3517,7 +3517,7 @@ intrinsics: - 'n: i64' - 'ptr: *const i8' links: - - link: 'llvm.aarch64.neon.ld4lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.aarch64.neon.ld4lane.v{neon_type[1].lane}{type[3]}.p0' arch: aarch64,arm64ec - FnCall: ['_vld4{neon_type[1].lane_nox}', ['b.0', 'b.1', 'b.2', 'b.3', 'LANE as i64', 'a as _']] @@ -3548,7 +3548,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld4.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld4.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld4{neon_type[1].nox}', ['a as *const i8', '{type[3]}']] @@ -3572,7 +3572,7 @@ intrinsics: - 'ptr: *const i8' - 'size: i32' links: - - link: 'llvm.arm.neon.vld4.v{neon_type[1].lane}{type[2]}.p0i8' + - link: 'llvm.arm.neon.vld4.v{neon_type[1].lane}{type[2]}.p0' arch: arm - FnCall: ['_vld4{neon_type[1].nox}', ['a as *const i8', '{type[3]}']] @@ -3691,7 +3691,7 @@ intrinsics: - 'n: i32' - 'size: i32' links: - - link: 'llvm.arm.neon.vld4lane.v{neon_type[1].lane}{type[3]}.p0i8' + - link: 'llvm.arm.neon.vld4lane.v{neon_type[1].lane}{type[3]}.p0' arch: arm - FnCall: ['_vld4{neon_type[1].lane_nox}', ['a as _', 'b.0', 'b.1', 'b.2', 'b.3', LANE, '{type[4]}']] @@ -3961,7 +3961,7 @@ intrinsics: - 'a: {type[2]}' - 'b: {type[2]}' links: - - link: 'llvm.arm.neon.vst1x2.p0{type[0]}.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst1x2.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst1{neon_type[1].no}', ['a', 'b.0', 'b.1']] @@ -3993,7 +3993,7 @@ intrinsics: - 'b: {type[2]}' - 'c: {type[2]}' links: - - link: 'llvm.arm.neon.vst1x3.p0{type[0]}.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst1x3.p0{type[0]}.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst1{neon_type[1].no}', ['a', 'b.0', 'b.1', 'b.2']] @@ -4026,7 +4026,7 @@ intrinsics: - 'c: {type[2]}' - 'd: {type[2]}' links: - - link: 'llvm.arm.neon.vst1x4.p0{type[0]}.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst1x4.p0{type[0]}.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst1{neon_type[1].no}', ['a', 'b.0', 'b.1', 'b.2', 'b.3']] @@ -4053,7 +4053,7 @@ intrinsics: - 'c: {type[2]}' - 'd: {type[2]}' links: - - link: 'llvm.arm.neon.vst1x4.p0{type[0]}.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst1x4.p0{type[0]}.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst1{neon_type[1].no}', ['a', 'b.0', 'b.1', 'b.2', 'b.3']] @@ -4098,7 +4098,7 @@ intrinsics: - 'b: {type[2]}' - 'size: i32' links: - - link: 'llvm.arm.neon.vst2.p0i8.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst2.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst2{neon_type[1].nox}', ['a as _', 'b.0', 'b.1', '8']] @@ -4292,7 +4292,7 @@ intrinsics: - 'b: {type[2]}' - 'size: i32' links: - - link: 'llvm.arm.neon.vst2.p0i8.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst2.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst2{neon_type[1].nox}', ['a as _', 'b.0', 'b.1', "{type[3]}"]] @@ -4327,7 +4327,7 @@ intrinsics: - 'n: i32' - 'size: i32' links: - - link: 'llvm.arm.neon.vst2lane.p0i8.v{neon_type[1].lane}{type[0]}' + - link: 'llvm.arm.neon.vst2lane.v{neon_type[1].lane}{type[0]}.p0' arch: arm - FnCall: ['_vst2{neon_type[1].lane_nox}', ['a as _', 'b.0', 'b.1', 'LANE', "{type[4]}"]] @@ -5147,14 +5147,14 @@ intrinsics: safety: unsafe: [neon] types: - - ["p8", poly8x8_t, poly16x8_t, int8x8_t] + - ["p8", poly8x8_t, poly16x8_t] compose: - LLVMLink: name: "pmull.{neon_type[1].no}" links: - - link: "llvm.aarch64.neon.pmull.{neon_type[3]}" + - link: "llvm.aarch64.neon.pmull.v8i16" arch: aarch64,arm64ec - - link: "llvm.arm.neon.vmullp.{neon_type[3]}" + - link: "llvm.arm.neon.vmullp.v8i16" arch: arm - name: "vmull_n{neon_type[0].no}" @@ -8426,7 +8426,7 @@ intrinsics: - "a: {neon_type[2]}" - "b: {neon_type[2]}" links: - - link: "llvm.arm.neon.vst1x{neon_type[1].tuple}.p0f32.{neon_type[2]}" + - link: "llvm.arm.neon.vst1x{neon_type[1].tuple}.{neon_type[2]}.p0" arch: arm - FnCall: ["_vst1{neon_type[1].no}", ['a', 'b.0', 'b.1']] @@ -8452,7 +8452,7 @@ intrinsics: - "b: {neon_type[2]}" - "c: {neon_type[2]}" links: - - link: "llvm.arm.neon.vst1x{neon_type[1].tuple}.p0f32.{neon_type[2]}" + - link: "llvm.arm.neon.vst1x{neon_type[1].tuple}.{neon_type[2]}.p0" arch: arm - FnCall: ["_vst1{neon_type[1].no}", ['a', 'b.0', 'b.1', 'b.2']] @@ -9668,7 +9668,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld4dup.{neon_type[2]}.p0i8" + - link: "llvm.arm.neon.vld4dup.{neon_type[2]}.p0" arch: arm - FnCall: ["_vld4{neon_type[1].dup_nox}", ['a as *const i8', "{type[3]}"]] @@ -9698,7 +9698,7 @@ intrinsics: arguments: - "ptr: {type[0]}" links: - - link: "llvm.aarch64.neon.ld4r.{neon_type[2]}.{type[3]}" + - link: "llvm.aarch64.neon.ld4r.{neon_type[2]}.{type[3]}.p0" arch: aarch64,arm64ec - FnCall: ["_vld4{neon_type[1].dup_nox}", ['a as _']] @@ -9722,7 +9722,7 @@ intrinsics: - "ptr: *const i8" - "size: i32" links: - - link: "llvm.arm.neon.vld4dup.v1i64.p0i8" + - link: "llvm.arm.neon.vld4dup.v1i64.p0" arch: arm - FnCall: ["_vld4{neon_type[1].dup_nox}", ['a as *const i8', '8']] @@ -9799,8 +9799,9 @@ intrinsics: arguments: ["a: {type[1]}", "b: {type[2]}"] return_type: "{neon_type[3]}" attr: - - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] + - FnCall: [cfg, ['target_arch = "arm"']] + - FnCall: [target_feature, ['enable = "neon,v7"']] + # - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld1]]}]] - FnCall: [unstable, ['feature = "stdarch_arm_neon_intrinsics"', 'issue = "111800"']] safety: unsafe: [neon] @@ -9830,19 +9831,19 @@ intrinsics: safety: unsafe: [neon] attr: - - *target-is-arm - - *neon-v7 + - FnCall: [cfg, ['target_arch = "arm"']] + - FnCall: [target_feature, ['enable = "neon,v7"']] - *neon-unstable - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vld1.{type[2]}"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] types: - - ['*const i8', int8x8_t, '8', 'crate::mem::align_of::() as i32', '_v8i8'] - - ['*const i8', int8x16_t, '8', 'crate::mem::align_of::() as i32', 'q_v16i8'] - - ['*const i16', int16x4_t, '16', 'crate::mem::align_of::() as i32', '_v4i16'] - - ['*const i16', int16x8_t, '16', 'crate::mem::align_of::() as i32', 'q_v8i16'] - - ['*const i32', int32x2_t, '32', 'crate::mem::align_of::() as i32', '_v2i32'] - - ['*const i32', int32x4_t, '32', 'crate::mem::align_of::() as i32', 'q_v4i32'] - - ['*const i64', int64x1_t, '64', 'crate::mem::align_of::() as i32', '_v1i64'] - - ['*const i64', int64x2_t, '64', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*const i8', int8x8_t, '"vld1.8"', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const i8', int8x16_t, '"vld1.8"', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const i16', int16x4_t, '"vld1.16"', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const i16', int16x8_t, '"vld1.16"', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const i32', int32x2_t, 'vldr', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*const i32', int32x4_t, '"vld1.32"', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*const i64', int64x1_t, 'vldr', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*const i64', int64x2_t, '"vld1.64"', 'crate::mem::align_of::() as i32', 'q_v2i64'] compose: - FnCall: - "vld1{type[4]}" @@ -9856,34 +9857,52 @@ intrinsics: safety: unsafe: [neon] attr: - - *target-is-arm + - FnCall: [cfg, ['target_arch = "arm"']] - FnCall: [target_feature, ['enable = "{type[3]}"']] - *neon-unstable - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vld1.{type[2]}"']]}]] - types: - - ['*const u8', uint8x8_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] - - ['*const u8', uint8x16_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] - - ['*const u16', uint16x4_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] - - ['*const u16', uint16x8_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] - - ['*const u32', uint32x2_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2i32'] - - ['*const u32', uint32x4_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4i32'] - - ['*const u64', uint64x1_t, '64', 'neon,v7', 'crate::mem::align_of::() as i32', '_v1i64'] - - ['*const u64', uint64x2_t, '64', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v2i64'] - - ['*const p8', poly8x8_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] - - ['*const p8', poly8x16_t, '8', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] - - ['*const p16', poly16x4_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] - - ['*const p16', poly16x8_t, '16', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] - - ['*const p64', poly64x1_t, '64', 'neon,aes', 'crate::mem::align_of::() as i32', '_v1i64'] - - ['*const p64', poly64x2_t, '64', 'neon,aes', 'crate::mem::align_of::() as i32', 'q_v2i64'] - - ['*const f32', float32x2_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2f32'] - - ['*const f32', float32x4_t, '32', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4f32'] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] + types: + - ['*const u8', uint8x8_t, '"vld1.8"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const u8', uint8x16_t, '"vld1.8"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const u16', uint16x4_t, '"vld1.16"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const u16', uint16x8_t, '"vld1.16"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const u32', uint32x2_t, 'vldr', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2i32'] + - ['*const u32', uint32x4_t, '"vld1.32"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4i32'] + - ['*const u64', uint64x1_t, 'vldr', 'neon,v7', 'crate::mem::align_of::() as i32', '_v1i64'] + - ['*const u64', uint64x2_t, '"vld1.64"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*const p8', poly8x8_t, '"vld1.8"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v8i8'] + - ['*const p8', poly8x16_t, '"vld1.8"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v16i8'] + - ['*const p16', poly16x4_t, '"vld1.16"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4i16'] + - ['*const p16', poly16x8_t, '"vld1.16"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v8i16'] + - ['*const p64', poly64x2_t, '"vld1.64"', 'neon,aes', 'crate::mem::align_of::() as i32', 'q_v2i64'] + - ['*const f32', float32x2_t, 'vldr', 'neon,v7', 'crate::mem::align_of::() as i32', '_v2f32'] + - ['*const f32', float32x4_t, '"vld1.32"', 'neon,v7', 'crate::mem::align_of::() as i32', 'q_v4f32'] + compose: + - FnCall: + - transmute + - - FnCall: + - "vld1{type[5]}" + - - 'ptr as *const i8' + - '{type[4]}' + + - name: "vld1{neon_type[1].no}" + doc: "Load multiple single-element structures to one, two, three, or four registers." + arguments: ["ptr: {type[0]}"] + return_type: "{neon_type[1]}" + safety: + unsafe: [neon] + attr: + - FnCall: [cfg, ['target_arch = "arm"']] + - FnCall: [target_feature, ['enable = "neon,aes"']] + - *neon-unstable + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['vldr']]}]] + types: + - ['*const p64', poly64x1_t] compose: - - FnCall: - - transmute - - - FnCall: - - "vld1{type[5]}" - - - 'ptr as *const i8' - - '{type[4]}' + # Inlining seems broken for 'fn vld1_v1i64', this "fixes" it + - Let: [a, '*const i8', 'ptr as *const i8'] + - Let: [b, i32, 'crate::mem::align_of::() as i32'] + - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; }} transmute(_vld1_v1i64(a, b))' - name: "vtbx1" visibility: private @@ -10006,7 +10025,6 @@ intrinsics: - FnCall: [transmute, ['b.1']] - FnCall: [transmute, [c]] - - name: "vtbx3" visibility: private doc: "Extended table look-up" @@ -10657,13 +10675,13 @@ intrinsics: types: - [u32, u64] compose: - - FnCall: - - __crc32w - - - FnCall: - - __crc32w - - - crc - - '(data & 0xFFFFFFFF) as u32' - - '(data >> 32) as u32' + # As the call to `__crc32` does not get inlined, we define an LLVM binding + # here, which is the same as above, and call it directly which results + # in the correct instructions being generated + - Let: [a, i32, 'crc as i32'] + - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] + - Let: [c, i32, '(data >> 32).as_signed() as i32'] + - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: i32, data: i32) -> i32;}} ___crc32w(___crc32w(a, b), c).as_unsigned()' - name: "__crc32cd" doc: "CRC32-C single round checksum for quad words (64 bits)." @@ -10679,13 +10697,10 @@ intrinsics: types: - [u32, u64] compose: - - FnCall: - - __crc32cw - - - FnCall: - - __crc32cw - - - crc - - '(data & 0xFFFFFFFF) as u32' - - '(data >> 32) as u32' + - Let: [a, i32, 'crc as i32'] + - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] + - Let: [c, i32, '(data >> 32).as_signed() as i32'] + - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: i32, data: i32) -> i32;}} ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32' - name: "vabs{neon_type.no}" doc: "Absolute value (wrapping)." @@ -10746,7 +10761,7 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpminu]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uminp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm @@ -10771,7 +10786,7 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmins]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmin]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fminp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm @@ -10788,7 +10803,6 @@ intrinsics: - link: "llvm.arm.neon.vpmins.{neon_type}" arch: arm - - name: "vpmax{neon_type.no}" doc: "Folding maximum of adjacent pairs" arguments: ["a: {neon_type}", "b: {neon_type}"] @@ -10820,7 +10834,7 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmaxu]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [umaxp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm @@ -10845,7 +10859,7 @@ intrinsics: return_type: "{neon_type}" attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmaxs]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpmax]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmaxp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm @@ -10882,9 +10896,9 @@ intrinsics: - LLVMLink: name: "vraddhn{neon_type[0].noq}" links: - - link: "llvm.aarch64.neon.raddhn.{neon_type[0]}" + - link: "llvm.aarch64.neon.raddhn.{neon_type[1]}" arch: aarch64,arm64ec - - link: "llvm.arm.neon.vraddhn.{neon_type[0]}" + - link: "llvm.arm.neon.vraddhn.{neon_type[1]}" arch: arm - name: "vraddhn{neon_type[0].noq}" @@ -10986,7 +11000,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.addp.{neon_type}" arch: aarch64,arm64ec - - link: "llvm.arm.neon.vaddp.{neon_type}" + - link: "llvm.arm.neon.vpadd.{neon_type}" arch: arm - name: "vpadd{neon_type[0].no}" @@ -11023,15 +11037,15 @@ intrinsics: unsafe: [neon] attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [sadalp]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - *neon-unstable-is-arm types: - - [int16x4_t, int8x8_t] - - [int32x2_t, int16x4_t] - - [int64x1_t, int32x2_t] - - [int16x8_t, int8x16_t] - - [int32x4_t, int16x8_t] - - [int64x2_t, int32x4_t] + - [int16x4_t, int8x8_t, '"vpadal.s8"'] + - [int32x2_t, int16x4_t, '"vpadal.s16"'] + - [int64x1_t, int32x2_t, '"vpadal.s32"'] + - [int16x8_t, int8x16_t, '"vpadal.s8"'] + - [int32x4_t, int16x8_t, '"vpadal.s16"'] + - [int64x2_t, int32x4_t, '"vpadal.s32"'] compose: - LLVMLink: name: "vpadal{neon_type[1].no}" @@ -11049,15 +11063,15 @@ intrinsics: unsafe: [neon] attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [uadalp]]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - *neon-unstable-is-arm types: - - [uint16x4_t, uint8x8_t] - - [uint32x2_t, uint16x4_t] - - [uint64x1_t, uint32x2_t] - - [uint16x8_t, uint8x16_t] - - [uint32x4_t, uint16x8_t] - - [uint64x2_t, uint32x4_t] + - [uint16x4_t, uint8x8_t , '"vpadal.u8"'] + - [uint32x2_t, uint16x4_t, '"vpadal.u16"'] + - [uint64x1_t, uint32x2_t, '"vpadal.u32"'] + - [uint16x8_t, uint8x16_t, '"vpadal.u8"'] + - [uint32x4_t, uint16x8_t, '"vpadal.u16"'] + - [uint64x2_t, uint32x4_t, '"vpadal.u32"'] compose: - LLVMLink: name: "vpadal{neon_type[1].no}" @@ -11073,17 +11087,17 @@ intrinsics: unsafe: [neon] attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [saddlp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm types: - - [int8x8_t, int16x4_t , 'vpadal.s8'] - - [int16x4_t, int32x2_t, 'vpadal.s16'] - - [int32x2_t, int64x1_t, 'vpadal.s32'] - - [int8x16_t, int16x8_t, 'vpadal.s8'] - - [int16x8_t, int32x4_t, 'vpadal.s16'] - - [int32x4_t, int64x2_t, 'vpadal.s32'] + - [int8x8_t, int16x4_t , '"vpaddl.s8"'] + - [int16x4_t, int32x2_t, '"vpaddl.s16"'] + - [int32x2_t, int64x1_t, '"vpaddl.s32"'] + - [int8x16_t, int16x8_t, '"vpaddl.s8"'] + - [int16x8_t, int32x4_t, '"vpaddl.s16"'] + - [int32x4_t, int64x2_t, '"vpaddl.s32"'] compose: - LLVMLink: name: "vpaddl{neon_type[1].no}" @@ -11101,17 +11115,17 @@ intrinsics: unsafe: [neon] attr: - *neon-v7 - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"{type[2]}"']]}]] + - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uaddlp]]}]] - *neon-stable-not-arm - *neon-unstable-is-arm types: - - [uint8x8_t, uint16x4_t , 'vpadal.u8'] - - [uint16x4_t, uint32x2_t, 'vpadal.u16'] - - [uint32x2_t, uint64x1_t, 'vpadal.u32'] - - [uint8x16_t, uint16x8_t, 'vpadal.u8'] - - [uint16x8_t, uint32x4_t, 'vpadal.u16'] - - [uint32x4_t, uint64x2_t, 'vpadal.u32'] + - [uint8x8_t, uint16x4_t , '"vpaddl.u8"'] + - [uint16x4_t, uint32x2_t, '"vpaddl.u16"'] + - [uint32x2_t, uint64x1_t, '"vpaddl.u32"'] + - [uint8x16_t, uint16x8_t, '"vpaddl.u8"'] + - [uint16x8_t, uint32x4_t, '"vpaddl.u16"'] + - [uint32x4_t, uint64x2_t, '"vpaddl.u32"'] compose: - LLVMLink: name: "vpaddl{neon_type[1].no}" @@ -11557,7 +11571,7 @@ intrinsics: - LLVMLink: name: "_vst1{type[0]}" links: - - link: "llvm.arm.neon.vst1.p0i8.{neon_type[2]}" + - link: "llvm.arm.neon.vst1.{neon_type[2]}.p0" arch: arm - name: "vst1{neon_type[1].no}" @@ -11610,10 +11624,9 @@ intrinsics: safety: unsafe: [neon] attr: - - *target-is-arm - - *neon-v7 + #- *target-is-arm + #- *neon-v7 - *neon-unstable - - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vsri.{type[2]}"']]}]] types: - ['_v8i8', "int8x8_t", '8'] - ['_v16i8', 'int8x16_t', '8'] @@ -11644,20 +11657,21 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] static_defs: ['const N: i32'] types: - - [uint8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] - - [uint8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] - - [uint16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] - - [uint16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] - - [uint32x2_t, "neon,v7", '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N'] - - [uint32x4_t, "neon,v7", '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N'] - - [uint64x1_t, "neon,v7", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] - - [uint64x2_t, "neon,v7", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] - - [poly8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] - - [poly8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] - - [poly16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] - - [poly16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] - - [poly64x1_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] - - [poly64x2_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] + - [uint8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] + - [uint8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] + - [uint16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] + - [uint16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] + - [uint32x2_t, "neon,v7", '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N'] + - [uint32x4_t, "neon,v7", '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N'] + - [uint64x1_t, "neon,v7", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] + - [uint64x2_t, "neon,v7", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] + - [poly8x8_t, "neon,v7", '8', '1 <= N && N <= 8', 'v8i8', 'int8x8_t::splat', '-N as i8'] + - [poly8x16_t, "neon,v7", '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] + - [poly16x4_t, "neon,v7", '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] + - [poly16x8_t, "neon,v7", '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] + ## These live in ./crates/core_arch/src/arm/neon.rs + #- [poly64x1_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] + #- [poly64x2_t, "neon,v7,aes", '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] compose: - FnCall: ["static_assert!", ['{type[3]}']] - FnCall: @@ -11712,20 +11726,21 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] static_defs: ['const N: i32'] types: - - [uint8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] - - [uint8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] - - [uint16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] - - [uint16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] - - [uint32x2_t, "neon,v7", '32', 'static_assert!', '0 >= N && N <= 31', 'v2i32', 'int32x2_t::splat', 'N as i32'] - - [uint32x4_t, "neon,v7", '32', 'static_assert!', '0 >= N && N <= 31', 'v4i32', 'int32x4_t::splat', 'N as i32'] - - [uint64x1_t, "neon,v7", '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] - - [uint64x2_t, "neon,v7", '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] - - [poly8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] - - [poly8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] - - [poly16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] - - [poly16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] - - [poly64x1_t, "neon,v7,aes", '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] - - [poly64x2_t, "neon,v7,aes", '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + - [uint8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] + - [uint8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] + - [uint16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] + - [uint16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] + - [uint32x2_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N as i32'] + - [uint32x4_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N as i32'] + - [uint64x1_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + - [uint64x2_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + - [poly8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] + - [poly8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] + - [poly16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] + - [poly16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] + ## These live in ./crates/core_arch/src/arm/neon.rs + #- [poly64x1_t, "neon,v7,aes", '"vsli.64"', 'static_assert!', '0 <= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + #- [poly64x2_t, "neon,v7,aes", '"vsli.64"', 'static_assert!', '0 <= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] compose: - FnCall: ["{type[3]}", ['{type[4]}']] - FnCall: @@ -11754,10 +11769,10 @@ intrinsics: - [int8x16_t, '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] - [int16x4_t, '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] - [int16x8_t, '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] - - [int32x2_t, '32', 'static_assert!', '0 >= N && N <= 31', 'v2i32', 'int32x2_t::splat', 'N'] - - [int32x4_t, '32', 'static_assert!', '0 >= N && N <= 31', 'v4i32', 'int32x4_t::splat', 'N'] - - [int64x1_t, '64', 'static_assert!', '0 >= N && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] - - [int64x2_t, '64', 'static_assert!', '0 >= N && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] + - [int32x2_t, '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N'] + - [int32x4_t, '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N'] + - [int64x1_t, '64', 'static_assert!', 'N >= 0 && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] + - [int64x2_t, '64', 'static_assert!', 'N >= 0 && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] compose: - FnCall: ["{type[2]}", ['{type[3]}']] - FnCall: From 3b49a07f70e39da1e2da497e67fd2b87ac2b2bf0 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 11 Feb 2025 15:36:05 +0000 Subject: [PATCH 08/13] fix test runner for armv7 --- crates/intrinsic-test/src/argument.rs | 4 +- crates/intrinsic-test/src/main.rs | 59 ++++++++++++++++++--------- crates/intrinsic-test/src/types.rs | 6 +-- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/crates/intrinsic-test/src/argument.rs b/crates/intrinsic-test/src/argument.rs index 155e150d40..3011bbf4a3 100644 --- a/crates/intrinsic-test/src/argument.rs +++ b/crates/intrinsic-test/src/argument.rs @@ -226,7 +226,7 @@ impl ArgumentList { ty = arg.to_c_type(), name = arg.name, load = if arg.is_simd() { - arg.ty.get_load_function(target) + arg.ty.get_load_function(armv7_p64) } else { "*".to_string() }, @@ -258,7 +258,7 @@ impl ArgumentList { name = arg.name, vals_name = arg.rust_vals_array_name(), load = if arg.is_simd() { - arg.ty.get_load_function("__") + arg.ty.get_load_function(false) } else { "*".to_string() }, diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index 9f6b6bcb28..d1881e3a94 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -201,10 +201,10 @@ fn main() {{ {passes} }} "#, - target_arch = if target.starts_with("aarch64") { - "aarch64" - } else { + target_arch = if target.contains("v7") { "arm" + } else { + "aarch64" }, arglists = intrinsic .arguments @@ -226,10 +226,10 @@ fn compile_c( cxx_toolchain_dir: Option<&str>, ) -> bool { let flags = std::env::var("CPPFLAGS").unwrap_or("".into()); - let arch_flags = if target.starts_with("aarch64") { - "-march=armv8.6-a+crypto+sha3+crc+dotprod" - } else { + let arch_flags = if target.contains("v7") { "-march=armv8.6-a+crypto+crc+dotprod" + } else { + "-march=armv8.6-a+crypto+sha3+crc+dotprod" }; let intrinsic_name = &intrinsic.name; @@ -394,7 +394,6 @@ path = "{intrinsic}/main.rs""#, /* If there has been a linker explicitly set from the command line then * we want to set it via setting it in the RUSTFLAGS*/ - let mut rust_flags = "-Cdebuginfo=0".to_string(); let cargo_command = format!( "cargo {toolchain} build --target {target} --release", @@ -403,12 +402,12 @@ path = "{intrinsic}/main.rs""#, ); let mut command = Command::new("sh"); - command .current_dir("rust_programs") .arg("-c") .arg(cargo_command); + let mut rust_flags = "-Cdebuginfo=0".to_string(); if let Some(linker) = linker { rust_flags.push_str(" -C linker="); rust_flags.push_str(linker); @@ -418,6 +417,7 @@ path = "{intrinsic}/main.rs""#, } command.env("RUSTFLAGS", rust_flags); + println!("{:?}", command); let output = command.output(); if let Ok(output) = output { @@ -552,8 +552,8 @@ fn main() { std::process::exit(3); } - if let Some(ref _toolchain) = toolchain { - if !compare_outputs(&intrinsics, &c_runner, target) { + if let Some(ref toolchain) = toolchain { + if !compare_outputs(&intrinsics, toolchain, &c_runner, target) { std::process::exit(1) } } @@ -565,7 +565,12 @@ enum FailureReason { Difference(String, String, String), } -fn compare_outputs(intrinsics: &Vec, runner: &str, target: &str) -> bool { +fn compare_outputs( + intrinsics: &Vec, + toolchain: &str, + runner: &str, + target: &str, +) -> bool { let intrinsics = intrinsics .par_iter() .filter_map(|intrinsic| { @@ -578,15 +583,29 @@ fn compare_outputs(intrinsics: &Vec, runner: &str, target: &str) -> b )) .output(); - let rust = Command::new("sh") - .arg("-c") - .arg(format!( - "{runner} ./rust_programs/target/{target}/release/{intrinsic}", - runner = runner, - target = target, - intrinsic = intrinsic.name, - )) - .output(); + let rust = if target != "aarch64_be-none-linux-gnu" { + Command::new("sh") + .current_dir("rust_programs") + .arg("-c") + .arg(format!( + "cargo {toolchain} run --target {target} --bin {intrinsic} --release", + intrinsic = intrinsic.name, + toolchain = toolchain, + target = target + )) + .env("RUSTFLAGS", "-Cdebuginfo=0") + .output() + } else { + Command::new("sh") + .arg("-c") + .arg(format!( + "{runner} ./rust_programs/target/{target}/release/{intrinsic}", + runner = runner, + target = target, + intrinsic = intrinsic.name, + )) + .output() + }; let (c, rust) = match (c, rust) { (Ok(c), Ok(rust)) => (c, rust), diff --git a/crates/intrinsic-test/src/types.rs b/crates/intrinsic-test/src/types.rs index 90559b5935..1eb44896f7 100644 --- a/crates/intrinsic-test/src/types.rs +++ b/crates/intrinsic-test/src/types.rs @@ -375,9 +375,9 @@ impl IntrinsicType { } /// Determines the load function for this type. - pub fn get_load_function(&self, target: &str) -> String { + pub fn get_load_function(&self, armv7_p64_workaround: bool) -> String { match self { - IntrinsicType::Ptr { child, .. } => child.get_load_function(target), + IntrinsicType::Ptr { child, .. } => child.get_load_function(armv7_p64_workaround), IntrinsicType::Type { kind: k, bit_len: Some(bl), @@ -397,7 +397,7 @@ impl IntrinsicType { TypeKind::Int => "s", TypeKind::Float => "f", // The ACLE doesn't support 64-bit polynomial loads on Armv7 - TypeKind::Poly => if target.starts_with("armv7") && *bl == 64 {"s"} else {"p"}, + TypeKind::Poly => if armv7_p64_workaround && *bl == 64 {"s"} else {"p"}, x => todo!("get_load_function TypeKind: {:#?}", x), }, size = bl, From 73c6e7fff517355c35553fe96776e32df151d938 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 11 Feb 2025 17:28:30 +0000 Subject: [PATCH 09/13] remove print statment and correct target name --- crates/intrinsic-test/src/main.rs | 3 +-- crates/stdarch-gen-arm/src/intrinsic.rs | 13 ++++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index d1881e3a94..ce7b169c5a 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -417,7 +417,6 @@ path = "{intrinsic}/main.rs""#, } command.env("RUSTFLAGS", rust_flags); - println!("{:?}", command); let output = command.output(); if let Ok(output) = output { @@ -583,7 +582,7 @@ fn compare_outputs( )) .output(); - let rust = if target != "aarch64_be-none-linux-gnu" { + let rust = if target != "aarch64_be-unknown-linux-gnu" { Command::new("sh") .current_dir("rust_programs") .arg("-c") diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index 0101423f1a..5d17a7aed6 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -1721,6 +1721,12 @@ fn create_tokens(intrinsic: &Intrinsic, endianness: Endianness, tokens: &mut Tok Endianness::NA => {} }; + let expressions = match endianness { + Endianness::Little | Endianness::NA => &intrinsic.compose, + Endianness::Big => &intrinsic.big_endian_compose, + }; + + /* If we have manually defined attributes on the block of yaml with * 'attr:' we want to add them */ if let Some(attr) = &intrinsic.attr { @@ -1767,13 +1773,6 @@ fn create_tokens(intrinsic: &Intrinsic, endianness: Endianness, tokens: &mut Tok } tokens.append_all(quote! { #signature }); - let expressions = match endianness { - Endianness::Little | Endianness::NA => &intrinsic.compose, - Endianness::Big => &intrinsic.big_endian_compose, - }; - - tokens.append_all(quote! { #signature }); - // If the intrinsic function is explicitly unsafe, we populate `body_default_safety` with // the implementation. No explicit unsafe blocks are required. // From 6f97ced494b20b69d0c4c7ef2e18b82e90909d0e Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 11 Feb 2025 17:35:39 +0000 Subject: [PATCH 10/13] fix up yaml & re-generate --- .../core_arch/src/aarch64/neon/generated.rs | 4325 ++-------- .../src/arm_shared/neon/generated.rs | 7327 ++++------------- crates/core_arch/src/arm_shared/neon/mod.rs | 560 +- crates/intrinsic-test/src/main.rs | 4 +- .../spec/neon/arm_shared.spec.yml | 6 +- crates/stdarch-gen-arm/src/intrinsic.rs | 1 - 6 files changed, 2405 insertions(+), 9818 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 7f1f737328..268a774751 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -1,9 +1,9 @@ // This code is automatically generated. DO NOT MODIFY. // -// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: // // ``` -// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec // ``` #![allow(improper_ctypes)] @@ -22,7 +22,7 @@ use super::*; #[cfg_attr(test, assert_instr(crc32cx))] #[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32cx" @@ -31,7 +31,6 @@ pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { } ___crc32cd(crc.as_signed(), data.as_signed()).as_unsigned() } - #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] #[doc = "## Safety"] @@ -42,7 +41,7 @@ pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { #[cfg_attr(test, assert_instr(crc32x))] #[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32x" @@ -51,7 +50,6 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { } ___crc32d(crc.as_signed(), data.as_signed()).as_unsigned() } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] #[doc = "## Safety"] @@ -68,7 +66,6 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let f: uint8x8_t = simd_cast(f); simd_add(a, simd_cast(f)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] #[doc = "## Safety"] @@ -89,7 +86,6 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let ret_val: int16x8_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] #[doc = "## Safety"] @@ -106,7 +102,6 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let f: uint16x4_t = simd_cast(f); simd_add(a, simd_cast(f)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] #[doc = "## Safety"] @@ -127,7 +122,6 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let ret_val: int32x4_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] #[doc = "## Safety"] @@ -144,7 +138,6 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let f: uint32x2_t = simd_cast(f); simd_add(a, simd_cast(f)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] #[doc = "## Safety"] @@ -165,7 +158,6 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let ret_val: int64x2_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] #[doc = "## Safety"] @@ -181,7 +173,6 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let f: uint8x8_t = vabd_u8(d, e); simd_add(a, simd_cast(f)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] #[doc = "## Safety"] @@ -201,7 +192,6 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let ret_val: uint16x8_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] #[doc = "## Safety"] @@ -217,7 +207,6 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let f: uint16x4_t = vabd_u16(d, e); simd_add(a, simd_cast(f)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] #[doc = "## Safety"] @@ -237,7 +226,6 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let ret_val: uint32x4_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] #[doc = "## Safety"] @@ -253,7 +241,6 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let f: uint32x2_t = vabd_u32(d, e); simd_add(a, simd_cast(f)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] #[doc = "## Safety"] @@ -273,7 +260,6 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let ret_val: uint64x2_t = simd_add(a, simd_cast(f)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"] #[doc = "## Safety"] @@ -283,7 +269,7 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v1f64" @@ -292,7 +278,6 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vabd_f64(a, b) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] #[doc = "## Safety"] @@ -303,7 +288,7 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v2f64" @@ -312,7 +297,6 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vabdq_f64(a, b) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] #[doc = "## Safety"] @@ -323,7 +307,7 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v2f64" @@ -335,7 +319,6 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vabdq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"] #[doc = "## Safety"] @@ -347,7 +330,6 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"] #[doc = "## Safety"] @@ -359,7 +341,6 @@ pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 { pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] #[doc = "## Safety"] @@ -375,7 +356,6 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let e: uint16x4_t = simd_cast(vabd_s16(c, d)); simd_cast(e) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] #[doc = "## Safety"] @@ -394,7 +374,6 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = simd_cast(e); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] #[doc = "## Safety"] @@ -410,7 +389,6 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let e: uint32x2_t = simd_cast(vabd_s32(c, d)); simd_cast(e) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] #[doc = "## Safety"] @@ -429,7 +407,6 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = simd_cast(e); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] #[doc = "## Safety"] @@ -445,7 +422,6 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let e: uint8x8_t = simd_cast(vabd_s8(c, d)); simd_cast(e) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] #[doc = "## Safety"] @@ -464,7 +440,6 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = simd_cast(e); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] #[doc = "## Safety"] @@ -479,7 +454,6 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_cast(vabd_u8(c, d)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] #[doc = "## Safety"] @@ -497,7 +471,6 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_cast(vabd_u8(c, d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] #[doc = "## Safety"] @@ -512,7 +485,6 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); simd_cast(vabd_u16(c, d)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] #[doc = "## Safety"] @@ -530,7 +502,6 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_cast(vabd_u16(c, d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] #[doc = "## Safety"] @@ -545,7 +516,6 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); simd_cast(vabd_u32(c, d)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] #[doc = "## Safety"] @@ -563,7 +533,6 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_cast(vabd_u32(c, d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"] #[doc = "## Safety"] @@ -575,7 +544,6 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { simd_fabs(a) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] #[doc = "## Safety"] @@ -588,7 +556,6 @@ pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { simd_fabs(a) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] #[doc = "## Safety"] @@ -603,7 +570,6 @@ pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_fabs(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"] #[doc = "## Safety"] @@ -613,7 +579,7 @@ pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v1i64" @@ -622,7 +588,6 @@ pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { } _vabs_s64(a) } - #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"] #[doc = "## Safety"] @@ -632,7 +597,7 @@ pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] pub unsafe fn vabsd_s64(a: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.i64" @@ -641,7 +606,6 @@ pub unsafe fn vabsd_s64(a: i64) -> i64 { } _vabsd_s64(a) } - #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] #[doc = "## Safety"] @@ -652,7 +616,7 @@ pub unsafe fn vabsd_s64(a: i64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v2i64" @@ -661,7 +625,6 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { } _vabsq_s64(a) } - #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] #[doc = "## Safety"] @@ -672,7 +635,7 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v2i64" @@ -683,7 +646,6 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vabsq_s64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] #[doc = "## Safety"] @@ -695,7 +657,6 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { a.wrapping_add(b) } - #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"] #[doc = "## Safety"] @@ -707,7 +668,6 @@ pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 { pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { a.wrapping_add(b) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] #[doc = "## Safety"] @@ -718,7 +678,7 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" @@ -727,7 +687,6 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { } _vaddlv_s16(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] #[doc = "## Safety"] @@ -738,7 +697,7 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" @@ -748,7 +707,6 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlv_s16(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] #[doc = "## Safety"] @@ -759,7 +717,7 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" @@ -768,7 +726,6 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { } _vaddlvq_s16(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] #[doc = "## Safety"] @@ -779,7 +736,7 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" @@ -789,7 +746,6 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlvq_s16(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] #[doc = "## Safety"] @@ -800,7 +756,7 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" @@ -809,7 +765,6 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { } _vaddlvq_s32(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] #[doc = "## Safety"] @@ -820,7 +775,7 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" @@ -830,7 +785,6 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlvq_s32(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] #[doc = "## Safety"] @@ -841,7 +795,7 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" @@ -850,7 +804,6 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { } _vaddlv_s32(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] #[doc = "## Safety"] @@ -861,7 +814,7 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" @@ -871,7 +824,6 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddlv_s32(a) } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] #[doc = "## Safety"] @@ -882,7 +834,7 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i8" @@ -891,7 +843,6 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { } _vaddlv_s8(a) as i16 } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] #[doc = "## Safety"] @@ -902,7 +853,7 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i8" @@ -912,7 +863,6 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlv_s8(a) as i16 } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] #[doc = "## Safety"] @@ -923,7 +873,7 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v16i8" @@ -932,7 +882,6 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { } _vaddlvq_s8(a) as i16 } - #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] #[doc = "## Safety"] @@ -943,7 +892,7 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v16i8" @@ -953,7 +902,6 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddlvq_s8(a) as i16 } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] #[doc = "## Safety"] @@ -964,7 +912,7 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" @@ -973,7 +921,6 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { } _vaddlv_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] #[doc = "## Safety"] @@ -984,7 +931,7 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" @@ -994,7 +941,6 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlv_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] #[doc = "## Safety"] @@ -1005,7 +951,7 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" @@ -1014,7 +960,6 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { } _vaddlvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] #[doc = "## Safety"] @@ -1025,7 +970,7 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" @@ -1035,7 +980,6 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] #[doc = "## Safety"] @@ -1046,7 +990,7 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" @@ -1055,7 +999,6 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { } _vaddlvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] #[doc = "## Safety"] @@ -1066,7 +1009,7 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" @@ -1076,7 +1019,6 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] #[doc = "## Safety"] @@ -1087,7 +1029,7 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlp))] pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" @@ -1096,7 +1038,6 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { } _vaddlv_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] #[doc = "## Safety"] @@ -1107,7 +1048,7 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlp))] pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" @@ -1117,7 +1058,6 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddlv_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] #[doc = "## Safety"] @@ -1128,7 +1068,7 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" @@ -1137,7 +1077,6 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { } _vaddlv_u8(a.as_signed()).as_unsigned() as u16 } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] #[doc = "## Safety"] @@ -1148,7 +1087,7 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" @@ -1158,7 +1097,6 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlv_u8(a.as_signed()).as_unsigned() as u16 } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] #[doc = "## Safety"] @@ -1169,7 +1107,7 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" @@ -1178,7 +1116,6 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { } _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 } - #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] #[doc = "## Safety"] @@ -1189,7 +1126,7 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" @@ -1199,7 +1136,6 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] #[doc = "## Safety"] @@ -1210,7 +1146,7 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v2f32" @@ -1219,7 +1155,6 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { } _vaddv_f32(a) } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] #[doc = "## Safety"] @@ -1230,7 +1165,7 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v2f32" @@ -1240,7 +1175,6 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddv_f32(a) } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] #[doc = "## Safety"] @@ -1251,7 +1185,7 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v4f32" @@ -1260,7 +1194,6 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { } _vaddvq_f32(a) } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] #[doc = "## Safety"] @@ -1271,7 +1204,7 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v4f32" @@ -1281,7 +1214,6 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddvq_f32(a) } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] #[doc = "## Safety"] @@ -1292,7 +1224,7 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f64.v2f64" @@ -1301,7 +1233,6 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { } _vaddvq_f64(a) } - #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] #[doc = "## Safety"] @@ -1312,7 +1243,7 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f64.v2f64" @@ -1322,7 +1253,6 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_f64(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] #[doc = "## Safety"] @@ -1333,7 +1263,7 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v2i32" @@ -1342,7 +1272,6 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { } _vaddv_s32(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] #[doc = "## Safety"] @@ -1353,7 +1282,7 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v2i32" @@ -1363,7 +1292,6 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddv_s32(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] #[doc = "## Safety"] @@ -1374,7 +1302,7 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v8i8" @@ -1383,7 +1311,6 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { } _vaddv_s8(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] #[doc = "## Safety"] @@ -1394,7 +1321,7 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v8i8" @@ -1404,7 +1331,6 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddv_s8(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] #[doc = "## Safety"] @@ -1415,7 +1341,7 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v16i8" @@ -1424,7 +1350,6 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { } _vaddvq_s8(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] #[doc = "## Safety"] @@ -1435,7 +1360,7 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v16i8" @@ -1445,7 +1370,6 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddvq_s8(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] #[doc = "## Safety"] @@ -1456,7 +1380,7 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v4i16" @@ -1465,7 +1389,6 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { } _vaddv_s16(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] #[doc = "## Safety"] @@ -1476,7 +1399,7 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v4i16" @@ -1486,7 +1409,6 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddv_s16(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] #[doc = "## Safety"] @@ -1497,7 +1419,7 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v8i16" @@ -1506,7 +1428,6 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { } _vaddvq_s16(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] #[doc = "## Safety"] @@ -1517,7 +1438,7 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v8i16" @@ -1527,7 +1448,6 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddvq_s16(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] #[doc = "## Safety"] @@ -1538,7 +1458,7 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v4i32" @@ -1547,7 +1467,6 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { } _vaddvq_s32(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] #[doc = "## Safety"] @@ -1558,7 +1477,7 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i32.v4i32" @@ -1568,7 +1487,6 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddvq_s32(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] #[doc = "## Safety"] @@ -1579,7 +1497,7 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" @@ -1588,7 +1506,6 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { } _vaddv_u32(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] #[doc = "## Safety"] @@ -1599,7 +1516,7 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" @@ -1609,7 +1526,6 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddv_u32(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] #[doc = "## Safety"] @@ -1620,7 +1536,7 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" @@ -1629,7 +1545,6 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { } _vaddv_u8(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] #[doc = "## Safety"] @@ -1640,7 +1555,7 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" @@ -1650,7 +1565,6 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddv_u8(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] #[doc = "## Safety"] @@ -1661,7 +1575,7 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" @@ -1670,7 +1584,6 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { } _vaddvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] #[doc = "## Safety"] @@ -1681,7 +1594,7 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" @@ -1691,7 +1604,6 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] #[doc = "## Safety"] @@ -1702,7 +1614,7 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" @@ -1711,7 +1623,6 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { } _vaddv_u16(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] #[doc = "## Safety"] @@ -1722,7 +1633,7 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" @@ -1732,7 +1643,6 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddv_u16(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] #[doc = "## Safety"] @@ -1743,7 +1653,7 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" @@ -1752,7 +1662,6 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { } _vaddvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] #[doc = "## Safety"] @@ -1763,7 +1672,7 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" @@ -1773,7 +1682,6 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] #[doc = "## Safety"] @@ -1784,7 +1692,7 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" @@ -1793,7 +1701,6 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { } _vaddvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] #[doc = "## Safety"] @@ -1804,7 +1711,7 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" @@ -1814,7 +1721,6 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] #[doc = "## Safety"] @@ -1825,7 +1731,7 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i64.v2i64" @@ -1834,7 +1740,6 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { } _vaddvq_s64(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] #[doc = "## Safety"] @@ -1845,7 +1750,7 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddv.i64.v2i64" @@ -1855,7 +1760,6 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_s64(a) } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] #[doc = "## Safety"] @@ -1866,7 +1770,7 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" @@ -1875,7 +1779,6 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { } _vaddvq_u64(a.as_signed()).as_unsigned() } - #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] #[doc = "## Safety"] @@ -1886,7 +1789,7 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" @@ -1896,7 +1799,6 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_u64(a.as_signed()).as_unsigned() } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] #[doc = "## Safety"] @@ -1907,7 +1809,7 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v16i8" @@ -1916,7 +1818,6 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } _vbcaxq_s8(a, b, c) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] #[doc = "## Safety"] @@ -1927,7 +1828,7 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v16i8" @@ -1944,7 +1845,6 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] #[doc = "## Safety"] @@ -1955,7 +1855,7 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v8i16" @@ -1964,7 +1864,6 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t } _vbcaxq_s16(a, b, c) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] #[doc = "## Safety"] @@ -1975,7 +1874,7 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v8i16" @@ -1988,7 +1887,6 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t let ret_val: int16x8_t = _vbcaxq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] #[doc = "## Safety"] @@ -1999,7 +1897,7 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v4i32" @@ -2008,7 +1906,6 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t } _vbcaxq_s32(a, b, c) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] #[doc = "## Safety"] @@ -2019,7 +1916,7 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v4i32" @@ -2032,7 +1929,6 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t let ret_val: int32x4_t = _vbcaxq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] #[doc = "## Safety"] @@ -2043,7 +1939,7 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v2i64" @@ -2052,7 +1948,6 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t } _vbcaxq_s64(a, b, c) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] #[doc = "## Safety"] @@ -2063,7 +1958,7 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v2i64" @@ -2076,7 +1971,6 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t let ret_val: int64x2_t = _vbcaxq_s64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] #[doc = "## Safety"] @@ -2087,7 +1981,7 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v16i8" @@ -2096,7 +1990,6 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 } _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] #[doc = "## Safety"] @@ -2107,7 +2000,7 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v16i8" @@ -2124,7 +2017,6 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] #[doc = "## Safety"] @@ -2135,7 +2027,7 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v8i16" @@ -2144,7 +2036,6 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x } _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] #[doc = "## Safety"] @@ -2155,7 +2046,7 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v8i16" @@ -2169,7 +2060,6 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] #[doc = "## Safety"] @@ -2180,7 +2070,7 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v4i32" @@ -2189,7 +2079,6 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x } _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] #[doc = "## Safety"] @@ -2200,7 +2089,7 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v4i32" @@ -2214,7 +2103,6 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] #[doc = "## Safety"] @@ -2225,7 +2113,7 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v2i64" @@ -2234,7 +2122,6 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x } _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] #[doc = "## Safety"] @@ -2245,7 +2132,7 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v2i64" @@ -2259,7 +2146,6 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[doc = "## Safety"] @@ -2270,7 +2156,7 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" @@ -2279,7 +2165,6 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vcadd_rot270_f32(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[doc = "## Safety"] @@ -2290,7 +2175,7 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" @@ -2302,7 +2187,6 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcadd_rot270_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[doc = "## Safety"] @@ -2313,7 +2197,7 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" @@ -2322,7 +2206,6 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vcaddq_rot270_f32(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[doc = "## Safety"] @@ -2333,7 +2216,7 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" @@ -2345,7 +2228,6 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcaddq_rot270_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[doc = "## Safety"] @@ -2356,7 +2238,7 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" @@ -2365,7 +2247,6 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vcaddq_rot270_f64(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[doc = "## Safety"] @@ -2376,7 +2257,7 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" @@ -2388,7 +2269,6 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vcaddq_rot270_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[doc = "## Safety"] @@ -2399,7 +2279,7 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" @@ -2408,7 +2288,6 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vcadd_rot90_f32(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[doc = "## Safety"] @@ -2419,7 +2298,7 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" @@ -2431,7 +2310,6 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcadd_rot90_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[doc = "## Safety"] @@ -2442,7 +2320,7 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" @@ -2451,7 +2329,6 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vcaddq_rot90_f32(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[doc = "## Safety"] @@ -2462,7 +2339,7 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" @@ -2474,7 +2351,6 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcaddq_rot90_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] #[doc = "## Safety"] @@ -2485,7 +2361,7 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" @@ -2494,7 +2370,6 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vcaddq_rot90_f64(a, b) } - #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] #[doc = "## Safety"] @@ -2505,7 +2380,7 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" @@ -2517,7 +2392,6 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vcaddq_rot90_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] #[doc = "## Safety"] @@ -2527,7 +2401,7 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v1i64.v1f64" @@ -2536,7 +2410,6 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } _vcage_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] #[doc = "## Safety"] @@ -2547,7 +2420,7 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" @@ -2556,7 +2429,6 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } _vcageq_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] #[doc = "## Safety"] @@ -2567,7 +2439,7 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" @@ -2579,7 +2451,6 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcageq_f64(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] #[doc = "## Safety"] @@ -2589,7 +2460,7 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i64.f64" @@ -2598,7 +2469,6 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { } _vcaged_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"] #[doc = "## Safety"] @@ -2608,7 +2478,7 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i32.f32" @@ -2617,7 +2487,6 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { } _vcages_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"] #[doc = "## Safety"] @@ -2627,7 +2496,7 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64" @@ -2636,7 +2505,6 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } _vcagt_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] #[doc = "## Safety"] @@ -2647,7 +2515,7 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" @@ -2656,7 +2524,6 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } _vcagtq_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] #[doc = "## Safety"] @@ -2667,7 +2534,7 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" @@ -2679,7 +2546,6 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcagtq_f64(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] #[doc = "## Safety"] @@ -2689,7 +2555,7 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i64.f64" @@ -2698,7 +2564,6 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { } _vcagtd_f64(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"] #[doc = "## Safety"] @@ -2708,7 +2573,7 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i32.f32" @@ -2717,7 +2582,6 @@ pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { } _vcagts_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"] #[doc = "## Safety"] @@ -2729,7 +2593,6 @@ pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { vcage_f64(b, a) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] #[doc = "## Safety"] @@ -2742,7 +2605,6 @@ pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcageq_f64(b, a) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] #[doc = "## Safety"] @@ -2758,7 +2620,6 @@ pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = vcageq_f64(b, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"] #[doc = "## Safety"] @@ -2770,7 +2631,6 @@ pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { vcaged_f64(b, a) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"] #[doc = "## Safety"] @@ -2782,7 +2642,6 @@ pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { vcages_f32(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"] #[doc = "## Safety"] @@ -2794,7 +2653,6 @@ pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { vcagt_f64(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] #[doc = "## Safety"] @@ -2807,7 +2665,6 @@ pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcagtq_f64(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] #[doc = "## Safety"] @@ -2823,7 +2680,6 @@ pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = vcagtq_f64(b, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"] #[doc = "## Safety"] @@ -2835,7 +2691,6 @@ pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { vcagtd_f64(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"] #[doc = "## Safety"] @@ -2847,7 +2702,6 @@ pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 { pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { vcagts_f32(b, a) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"] #[doc = "## Safety"] @@ -2859,7 +2713,6 @@ pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 { pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_eq(a, b) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] #[doc = "## Safety"] @@ -2872,7 +2725,6 @@ pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_eq(a, b) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] #[doc = "## Safety"] @@ -2888,7 +2740,6 @@ pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"] #[doc = "## Safety"] @@ -2900,7 +2751,6 @@ pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] #[doc = "## Safety"] @@ -2913,7 +2763,6 @@ pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] #[doc = "## Safety"] @@ -2929,7 +2778,6 @@ pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"] #[doc = "## Safety"] @@ -2941,7 +2789,6 @@ pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] #[doc = "## Safety"] @@ -2954,7 +2801,6 @@ pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] #[doc = "## Safety"] @@ -2970,7 +2816,6 @@ pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"] #[doc = "## Safety"] @@ -2982,7 +2827,6 @@ pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] #[doc = "## Safety"] @@ -2995,7 +2839,6 @@ pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] #[doc = "## Safety"] @@ -3011,7 +2854,6 @@ pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"] #[doc = "## Safety"] @@ -3023,7 +2865,6 @@ pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"] #[doc = "## Safety"] @@ -3035,7 +2876,6 @@ pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 { pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Compare bitwise equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"] #[doc = "## Safety"] @@ -3047,7 +2887,6 @@ pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 { pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { transmute(vceq_s64(transmute(a), transmute(b))) } - #[doc = "Compare bitwise equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"] #[doc = "## Safety"] @@ -3059,7 +2898,6 @@ pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 { pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { transmute(vceq_u64(transmute(a), transmute(b))) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] #[doc = "## Safety"] @@ -3073,7 +2911,6 @@ pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_eq(a, transmute(b)) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] #[doc = "## Safety"] @@ -3089,7 +2926,6 @@ pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] #[doc = "## Safety"] @@ -3103,7 +2939,6 @@ pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_eq(a, transmute(b)) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] #[doc = "## Safety"] @@ -3119,7 +2954,6 @@ pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"] #[doc = "## Safety"] @@ -3132,7 +2966,6 @@ pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_eq(a, transmute(b)) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] #[doc = "## Safety"] @@ -3146,7 +2979,6 @@ pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_eq(a, transmute(b)) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] #[doc = "## Safety"] @@ -3162,7 +2994,6 @@ pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] #[doc = "## Safety"] @@ -3176,7 +3007,6 @@ pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] #[doc = "## Safety"] @@ -3192,7 +3022,6 @@ pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] #[doc = "## Safety"] @@ -3206,7 +3035,6 @@ pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] #[doc = "## Safety"] @@ -3226,7 +3054,6 @@ pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] #[doc = "## Safety"] @@ -3240,7 +3067,6 @@ pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] #[doc = "## Safety"] @@ -3256,7 +3082,6 @@ pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] #[doc = "## Safety"] @@ -3270,7 +3095,6 @@ pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] #[doc = "## Safety"] @@ -3286,7 +3110,6 @@ pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] #[doc = "## Safety"] @@ -3300,7 +3123,6 @@ pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] #[doc = "## Safety"] @@ -3316,7 +3138,6 @@ pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] #[doc = "## Safety"] @@ -3330,7 +3151,6 @@ pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] #[doc = "## Safety"] @@ -3346,7 +3166,6 @@ pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"] #[doc = "## Safety"] @@ -3359,7 +3178,6 @@ pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] #[doc = "## Safety"] @@ -3373,7 +3191,6 @@ pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] #[doc = "## Safety"] @@ -3389,7 +3206,6 @@ pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] #[doc = "## Safety"] @@ -3403,7 +3219,6 @@ pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] #[doc = "## Safety"] @@ -3419,7 +3234,6 @@ pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] #[doc = "## Safety"] @@ -3433,7 +3247,6 @@ pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] #[doc = "## Safety"] @@ -3453,7 +3266,6 @@ pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"] #[doc = "## Safety"] @@ -3466,7 +3278,6 @@ pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] #[doc = "## Safety"] @@ -3480,7 +3291,6 @@ pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_eq(a, transmute(b)) } - #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] #[doc = "## Safety"] @@ -3496,7 +3306,6 @@ pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] #[doc = "## Safety"] @@ -3510,7 +3319,6 @@ pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] #[doc = "## Safety"] @@ -3526,7 +3334,6 @@ pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] #[doc = "## Safety"] @@ -3540,7 +3347,6 @@ pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] #[doc = "## Safety"] @@ -3560,7 +3366,6 @@ pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] #[doc = "## Safety"] @@ -3574,7 +3379,6 @@ pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { let b: u16x4 = u16x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] #[doc = "## Safety"] @@ -3590,7 +3394,6 @@ pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] #[doc = "## Safety"] @@ -3604,7 +3407,6 @@ pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] #[doc = "## Safety"] @@ -3620,7 +3422,6 @@ pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] #[doc = "## Safety"] @@ -3634,7 +3435,6 @@ pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { let b: u32x2 = u32x2::new(0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] #[doc = "## Safety"] @@ -3650,7 +3450,6 @@ pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] #[doc = "## Safety"] @@ -3664,7 +3463,6 @@ pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { let b: u32x4 = u32x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] #[doc = "## Safety"] @@ -3680,7 +3478,6 @@ pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] #[doc = "## Safety"] @@ -3693,7 +3490,6 @@ pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { let b: u64x1 = u64x1::new(0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] #[doc = "## Safety"] @@ -3707,7 +3503,6 @@ pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { let b: u64x2 = u64x2::new(0, 0); simd_eq(a, transmute(b)) } - #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] #[doc = "## Safety"] @@ -3723,7 +3518,6 @@ pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_eq(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"] #[doc = "## Safety"] @@ -3735,7 +3529,6 @@ pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { pub unsafe fn vceqzd_s64(a: i64) -> u64 { transmute(vceqz_s64(transmute(a))) } - #[doc = "Compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"] #[doc = "## Safety"] @@ -3747,7 +3540,6 @@ pub unsafe fn vceqzd_s64(a: i64) -> u64 { pub unsafe fn vceqzd_u64(a: u64) -> u64 { transmute(vceqz_u64(transmute(a))) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"] #[doc = "## Safety"] @@ -3759,7 +3551,6 @@ pub unsafe fn vceqzd_u64(a: u64) -> u64 { pub unsafe fn vceqzs_f32(a: f32) -> u32 { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) } - #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"] #[doc = "## Safety"] @@ -3771,7 +3562,6 @@ pub unsafe fn vceqzs_f32(a: f32) -> u32 { pub unsafe fn vceqzd_f64(a: f64) -> u64 { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"] #[doc = "## Safety"] @@ -3783,7 +3573,6 @@ pub unsafe fn vceqzd_f64(a: f64) -> u64 { pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_ge(a, b) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] #[doc = "## Safety"] @@ -3796,7 +3585,6 @@ pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_ge(a, b) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] #[doc = "## Safety"] @@ -3812,7 +3600,6 @@ pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"] #[doc = "## Safety"] @@ -3824,7 +3611,6 @@ pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] #[doc = "## Safety"] @@ -3837,7 +3623,6 @@ pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] #[doc = "## Safety"] @@ -3853,7 +3638,6 @@ pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"] #[doc = "## Safety"] @@ -3865,7 +3649,6 @@ pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] #[doc = "## Safety"] @@ -3878,7 +3661,6 @@ pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] #[doc = "## Safety"] @@ -3894,7 +3676,6 @@ pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"] #[doc = "## Safety"] @@ -3906,7 +3687,6 @@ pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"] #[doc = "## Safety"] @@ -3918,7 +3698,6 @@ pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 { pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"] #[doc = "## Safety"] @@ -3930,7 +3709,6 @@ pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { transmute(vcge_s64(transmute(a), transmute(b))) } - #[doc = "Compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"] #[doc = "## Safety"] @@ -3942,7 +3720,6 @@ pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 { pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { transmute(vcge_u64(transmute(a), transmute(b))) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] #[doc = "## Safety"] @@ -3956,7 +3733,6 @@ pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_ge(a, transmute(b)) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] #[doc = "## Safety"] @@ -3972,7 +3748,6 @@ pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] #[doc = "## Safety"] @@ -3986,7 +3761,6 @@ pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_ge(a, transmute(b)) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] #[doc = "## Safety"] @@ -4002,7 +3776,6 @@ pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"] #[doc = "## Safety"] @@ -4015,7 +3788,6 @@ pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_ge(a, transmute(b)) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] #[doc = "## Safety"] @@ -4029,7 +3801,6 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_ge(a, transmute(b)) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] #[doc = "## Safety"] @@ -4045,7 +3816,6 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] #[doc = "## Safety"] @@ -4059,7 +3829,6 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] #[doc = "## Safety"] @@ -4075,7 +3844,6 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] #[doc = "## Safety"] @@ -4089,7 +3857,6 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] #[doc = "## Safety"] @@ -4109,7 +3876,6 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] #[doc = "## Safety"] @@ -4123,7 +3889,6 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] #[doc = "## Safety"] @@ -4139,7 +3904,6 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] #[doc = "## Safety"] @@ -4153,7 +3917,6 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] #[doc = "## Safety"] @@ -4169,7 +3932,6 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] #[doc = "## Safety"] @@ -4183,7 +3945,6 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] #[doc = "## Safety"] @@ -4199,7 +3960,6 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] #[doc = "## Safety"] @@ -4213,7 +3973,6 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] #[doc = "## Safety"] @@ -4229,7 +3988,6 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"] #[doc = "## Safety"] @@ -4242,7 +4000,6 @@ pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] #[doc = "## Safety"] @@ -4256,7 +4013,6 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_ge(a, transmute(b)) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] #[doc = "## Safety"] @@ -4272,7 +4028,6 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ge(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"] #[doc = "## Safety"] @@ -4284,7 +4039,6 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { pub unsafe fn vcgezd_f64(a: f64) -> u64 { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) } - #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"] #[doc = "## Safety"] @@ -4296,7 +4050,6 @@ pub unsafe fn vcgezd_f64(a: f64) -> u64 { pub unsafe fn vcgezs_f32(a: f32) -> u32 { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) } - #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"] #[doc = "## Safety"] @@ -4308,7 +4061,6 @@ pub unsafe fn vcgezs_f32(a: f32) -> u32 { pub unsafe fn vcgezd_s64(a: i64) -> u64 { transmute(vcgez_s64(transmute(a))) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"] #[doc = "## Safety"] @@ -4320,7 +4072,6 @@ pub unsafe fn vcgezd_s64(a: i64) -> u64 { pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_gt(a, b) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] #[doc = "## Safety"] @@ -4333,7 +4084,6 @@ pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_gt(a, b) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] #[doc = "## Safety"] @@ -4349,7 +4099,6 @@ pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"] #[doc = "## Safety"] @@ -4361,7 +4110,6 @@ pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] #[doc = "## Safety"] @@ -4374,7 +4122,6 @@ pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] #[doc = "## Safety"] @@ -4390,7 +4137,6 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"] #[doc = "## Safety"] @@ -4402,7 +4148,6 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] #[doc = "## Safety"] @@ -4415,7 +4160,6 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] #[doc = "## Safety"] @@ -4431,7 +4175,6 @@ pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"] #[doc = "## Safety"] @@ -4443,7 +4186,6 @@ pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"] #[doc = "## Safety"] @@ -4455,7 +4197,6 @@ pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 { pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"] #[doc = "## Safety"] @@ -4467,7 +4208,6 @@ pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { transmute(vcgt_s64(transmute(a), transmute(b))) } - #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"] #[doc = "## Safety"] @@ -4479,7 +4219,6 @@ pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 { pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { transmute(vcgt_u64(transmute(a), transmute(b))) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] #[doc = "## Safety"] @@ -4493,7 +4232,6 @@ pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_gt(a, transmute(b)) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] #[doc = "## Safety"] @@ -4509,7 +4247,6 @@ pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] #[doc = "## Safety"] @@ -4523,7 +4260,6 @@ pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_gt(a, transmute(b)) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] #[doc = "## Safety"] @@ -4539,7 +4275,6 @@ pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"] #[doc = "## Safety"] @@ -4552,7 +4287,6 @@ pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_gt(a, transmute(b)) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] #[doc = "## Safety"] @@ -4566,7 +4300,6 @@ pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_gt(a, transmute(b)) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] #[doc = "## Safety"] @@ -4582,7 +4315,6 @@ pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] #[doc = "## Safety"] @@ -4596,7 +4328,6 @@ pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] #[doc = "## Safety"] @@ -4612,7 +4343,6 @@ pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] #[doc = "## Safety"] @@ -4626,7 +4356,6 @@ pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] #[doc = "## Safety"] @@ -4646,7 +4375,6 @@ pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] #[doc = "## Safety"] @@ -4660,7 +4388,6 @@ pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] #[doc = "## Safety"] @@ -4676,7 +4403,6 @@ pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] #[doc = "## Safety"] @@ -4690,7 +4416,6 @@ pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] #[doc = "## Safety"] @@ -4706,7 +4431,6 @@ pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] #[doc = "## Safety"] @@ -4720,7 +4444,6 @@ pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] #[doc = "## Safety"] @@ -4736,7 +4459,6 @@ pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] #[doc = "## Safety"] @@ -4750,7 +4472,6 @@ pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] #[doc = "## Safety"] @@ -4766,7 +4487,6 @@ pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"] #[doc = "## Safety"] @@ -4779,7 +4499,6 @@ pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] #[doc = "## Safety"] @@ -4793,7 +4512,6 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_gt(a, transmute(b)) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] #[doc = "## Safety"] @@ -4809,7 +4527,6 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_gt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"] #[doc = "## Safety"] @@ -4821,7 +4538,6 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { pub unsafe fn vcgtzd_f64(a: f64) -> u64 { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) } - #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"] #[doc = "## Safety"] @@ -4833,7 +4549,6 @@ pub unsafe fn vcgtzd_f64(a: f64) -> u64 { pub unsafe fn vcgtzs_f32(a: f32) -> u32 { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) } - #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"] #[doc = "## Safety"] @@ -4845,7 +4560,6 @@ pub unsafe fn vcgtzs_f32(a: f32) -> u32 { pub unsafe fn vcgtzd_s64(a: i64) -> u64 { transmute(vcgtz_s64(transmute(a))) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"] #[doc = "## Safety"] @@ -4857,7 +4571,6 @@ pub unsafe fn vcgtzd_s64(a: i64) -> u64 { pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_le(a, b) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] #[doc = "## Safety"] @@ -4870,7 +4583,6 @@ pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_le(a, b) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] #[doc = "## Safety"] @@ -4886,7 +4598,6 @@ pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"] #[doc = "## Safety"] @@ -4898,7 +4609,6 @@ pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] #[doc = "## Safety"] @@ -4911,7 +4621,6 @@ pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] #[doc = "## Safety"] @@ -4927,7 +4636,6 @@ pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"] #[doc = "## Safety"] @@ -4939,7 +4647,6 @@ pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] #[doc = "## Safety"] @@ -4952,7 +4659,6 @@ pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] #[doc = "## Safety"] @@ -4968,7 +4674,6 @@ pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"] #[doc = "## Safety"] @@ -4980,7 +4685,6 @@ pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"] #[doc = "## Safety"] @@ -4992,7 +4696,6 @@ pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 { pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"] #[doc = "## Safety"] @@ -5004,7 +4707,6 @@ pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { transmute(vcle_u64(transmute(a), transmute(b))) } - #[doc = "Compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"] #[doc = "## Safety"] @@ -5016,7 +4718,6 @@ pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 { pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { transmute(vcle_s64(transmute(a), transmute(b))) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] #[doc = "## Safety"] @@ -5030,7 +4731,6 @@ pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_le(a, transmute(b)) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] #[doc = "## Safety"] @@ -5046,7 +4746,6 @@ pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] #[doc = "## Safety"] @@ -5060,7 +4759,6 @@ pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_le(a, transmute(b)) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] #[doc = "## Safety"] @@ -5076,7 +4774,6 @@ pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"] #[doc = "## Safety"] @@ -5089,7 +4786,6 @@ pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_le(a, transmute(b)) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] #[doc = "## Safety"] @@ -5103,7 +4799,6 @@ pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_le(a, transmute(b)) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] #[doc = "## Safety"] @@ -5119,7 +4814,6 @@ pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] #[doc = "## Safety"] @@ -5133,7 +4827,6 @@ pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] #[doc = "## Safety"] @@ -5149,7 +4842,6 @@ pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] #[doc = "## Safety"] @@ -5163,7 +4855,6 @@ pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] #[doc = "## Safety"] @@ -5183,7 +4874,6 @@ pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] #[doc = "## Safety"] @@ -5197,7 +4887,6 @@ pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] #[doc = "## Safety"] @@ -5213,7 +4902,6 @@ pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] #[doc = "## Safety"] @@ -5227,7 +4915,6 @@ pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] #[doc = "## Safety"] @@ -5243,7 +4930,6 @@ pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] #[doc = "## Safety"] @@ -5257,7 +4943,6 @@ pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] #[doc = "## Safety"] @@ -5273,7 +4958,6 @@ pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] #[doc = "## Safety"] @@ -5287,7 +4971,6 @@ pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] #[doc = "## Safety"] @@ -5303,7 +4986,6 @@ pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"] #[doc = "## Safety"] @@ -5316,7 +4998,6 @@ pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] #[doc = "## Safety"] @@ -5330,7 +5011,6 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_le(a, transmute(b)) } - #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] #[doc = "## Safety"] @@ -5346,7 +5026,6 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_le(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"] #[doc = "## Safety"] @@ -5358,7 +5037,6 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { pub unsafe fn vclezd_f64(a: f64) -> u64 { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) } - #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"] #[doc = "## Safety"] @@ -5370,7 +5048,6 @@ pub unsafe fn vclezd_f64(a: f64) -> u64 { pub unsafe fn vclezs_f32(a: f32) -> u32 { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) } - #[doc = "Compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"] #[doc = "## Safety"] @@ -5382,7 +5059,6 @@ pub unsafe fn vclezs_f32(a: f32) -> u32 { pub unsafe fn vclezd_s64(a: i64) -> u64 { transmute(vclez_s64(transmute(a))) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"] #[doc = "## Safety"] @@ -5394,7 +5070,6 @@ pub unsafe fn vclezd_s64(a: i64) -> u64 { pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { simd_lt(a, b) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] #[doc = "## Safety"] @@ -5407,7 +5082,6 @@ pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_lt(a, b) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] #[doc = "## Safety"] @@ -5423,7 +5097,6 @@ pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"] #[doc = "## Safety"] @@ -5435,7 +5108,6 @@ pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] #[doc = "## Safety"] @@ -5448,7 +5120,6 @@ pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] #[doc = "## Safety"] @@ -5464,7 +5135,6 @@ pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"] #[doc = "## Safety"] @@ -5476,7 +5146,6 @@ pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] #[doc = "## Safety"] @@ -5489,7 +5158,6 @@ pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] #[doc = "## Safety"] @@ -5505,7 +5173,6 @@ pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"] #[doc = "## Safety"] @@ -5517,7 +5184,6 @@ pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { transmute(vclt_u64(transmute(a), transmute(b))) } - #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"] #[doc = "## Safety"] @@ -5529,7 +5195,6 @@ pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 { pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { transmute(vclt_s64(transmute(a), transmute(b))) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"] #[doc = "## Safety"] @@ -5541,7 +5206,6 @@ pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 { pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"] #[doc = "## Safety"] @@ -5553,7 +5217,6 @@ pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 { pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] #[doc = "## Safety"] @@ -5567,7 +5230,6 @@ pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { let b: f32x2 = f32x2::new(0.0, 0.0); simd_lt(a, transmute(b)) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] #[doc = "## Safety"] @@ -5583,7 +5245,6 @@ pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] #[doc = "## Safety"] @@ -5597,7 +5258,6 @@ pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); simd_lt(a, transmute(b)) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] #[doc = "## Safety"] @@ -5613,7 +5273,6 @@ pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"] #[doc = "## Safety"] @@ -5626,7 +5285,6 @@ pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { let b: f64 = 0.0; simd_lt(a, transmute(b)) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] #[doc = "## Safety"] @@ -5640,7 +5298,6 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_lt(a, transmute(b)) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] #[doc = "## Safety"] @@ -5656,7 +5313,6 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] #[doc = "## Safety"] @@ -5670,7 +5326,6 @@ pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] #[doc = "## Safety"] @@ -5686,7 +5341,6 @@ pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] #[doc = "## Safety"] @@ -5700,7 +5354,6 @@ pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] #[doc = "## Safety"] @@ -5720,7 +5373,6 @@ pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] #[doc = "## Safety"] @@ -5734,7 +5386,6 @@ pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { let b: i16x4 = i16x4::new(0, 0, 0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] #[doc = "## Safety"] @@ -5750,7 +5401,6 @@ pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] #[doc = "## Safety"] @@ -5764,7 +5414,6 @@ pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] #[doc = "## Safety"] @@ -5780,7 +5429,6 @@ pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] #[doc = "## Safety"] @@ -5794,7 +5442,6 @@ pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { let b: i32x2 = i32x2::new(0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] #[doc = "## Safety"] @@ -5810,7 +5457,6 @@ pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] #[doc = "## Safety"] @@ -5824,7 +5470,6 @@ pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { let b: i32x4 = i32x4::new(0, 0, 0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] #[doc = "## Safety"] @@ -5840,7 +5485,6 @@ pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"] #[doc = "## Safety"] @@ -5853,7 +5497,6 @@ pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { let b: i64x1 = i64x1::new(0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] #[doc = "## Safety"] @@ -5867,7 +5510,6 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_lt(a, transmute(b)) } - #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] #[doc = "## Safety"] @@ -5883,7 +5525,6 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_lt(a, transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"] #[doc = "## Safety"] @@ -5895,7 +5536,6 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { pub unsafe fn vcltzd_f64(a: f64) -> u64 { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) } - #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"] #[doc = "## Safety"] @@ -5907,7 +5547,6 @@ pub unsafe fn vcltzd_f64(a: f64) -> u64 { pub unsafe fn vcltzs_f32(a: f32) -> u32 { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) } - #[doc = "Compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"] #[doc = "## Safety"] @@ -5919,7 +5558,6 @@ pub unsafe fn vcltzs_f32(a: f32) -> u32 { pub unsafe fn vcltzd_s64(a: i64) -> u64 { transmute(vcltz_s64(transmute(a))) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] #[doc = "## Safety"] @@ -5930,7 +5568,7 @@ pub unsafe fn vcltzd_s64(a: i64) -> u64 { #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" @@ -5939,7 +5577,6 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float } _vcmla_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] #[doc = "## Safety"] @@ -5950,7 +5587,7 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" @@ -5963,7 +5600,6 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float let ret_val: float32x2_t = _vcmla_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] #[doc = "## Safety"] @@ -5974,7 +5610,7 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" @@ -5983,7 +5619,6 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa } _vcmlaq_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] #[doc = "## Safety"] @@ -5994,7 +5629,7 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" @@ -6007,7 +5642,6 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa let ret_val: float32x4_t = _vcmlaq_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] #[doc = "## Safety"] @@ -6018,7 +5652,7 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" @@ -6027,7 +5661,6 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa } _vcmlaq_f64(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] #[doc = "## Safety"] @@ -6038,7 +5671,7 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" @@ -6051,7 +5684,6 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa let ret_val: float64x2_t = _vcmlaq_f64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] #[doc = "## Safety"] @@ -6071,7 +5703,6 @@ pub unsafe fn vcmla_lane_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] #[doc = "## Safety"] @@ -6095,7 +5726,6 @@ pub unsafe fn vcmla_lane_f32( let ret_val: float32x2_t = vcmla_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] #[doc = "## Safety"] @@ -6124,7 +5754,6 @@ pub unsafe fn vcmlaq_lane_f32( ); vcmlaq_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] #[doc = "## Safety"] @@ -6157,7 +5786,6 @@ pub unsafe fn vcmlaq_lane_f32( let ret_val: float32x4_t = vcmlaq_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] #[doc = "## Safety"] @@ -6177,7 +5805,6 @@ pub unsafe fn vcmla_laneq_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] #[doc = "## Safety"] @@ -6201,7 +5828,6 @@ pub unsafe fn vcmla_laneq_f32( let ret_val: float32x2_t = vcmla_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] #[doc = "## Safety"] @@ -6230,7 +5856,6 @@ pub unsafe fn vcmlaq_laneq_f32( ); vcmlaq_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] #[doc = "## Safety"] @@ -6263,7 +5888,6 @@ pub unsafe fn vcmlaq_laneq_f32( let ret_val: float32x4_t = vcmlaq_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] #[doc = "## Safety"] @@ -6274,7 +5898,7 @@ pub unsafe fn vcmlaq_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" @@ -6283,7 +5907,6 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - } _vcmla_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] #[doc = "## Safety"] @@ -6294,7 +5917,7 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" @@ -6307,7 +5930,6 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - let ret_val: float32x2_t = _vcmla_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] #[doc = "## Safety"] @@ -6318,7 +5940,7 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" @@ -6327,7 +5949,6 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) } _vcmlaq_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] #[doc = "## Safety"] @@ -6338,7 +5959,7 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" @@ -6351,7 +5972,6 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) let ret_val: float32x4_t = _vcmlaq_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] #[doc = "## Safety"] @@ -6362,7 +5982,7 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" @@ -6371,7 +5991,6 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) } _vcmlaq_rot180_f64(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] #[doc = "## Safety"] @@ -6382,7 +6001,7 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" @@ -6395,7 +6014,6 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) let ret_val: float64x2_t = _vcmlaq_rot180_f64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] #[doc = "## Safety"] @@ -6415,7 +6033,6 @@ pub unsafe fn vcmla_rot180_lane_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] #[doc = "## Safety"] @@ -6439,7 +6056,6 @@ pub unsafe fn vcmla_rot180_lane_f32( let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] #[doc = "## Safety"] @@ -6468,7 +6084,6 @@ pub unsafe fn vcmlaq_rot180_lane_f32( ); vcmlaq_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] #[doc = "## Safety"] @@ -6501,7 +6116,6 @@ pub unsafe fn vcmlaq_rot180_lane_f32( let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] #[doc = "## Safety"] @@ -6521,7 +6135,6 @@ pub unsafe fn vcmla_rot180_laneq_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] #[doc = "## Safety"] @@ -6545,7 +6158,6 @@ pub unsafe fn vcmla_rot180_laneq_f32( let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] #[doc = "## Safety"] @@ -6574,7 +6186,6 @@ pub unsafe fn vcmlaq_rot180_laneq_f32( ); vcmlaq_rot180_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] #[doc = "## Safety"] @@ -6607,7 +6218,6 @@ pub unsafe fn vcmlaq_rot180_laneq_f32( let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] #[doc = "## Safety"] @@ -6618,7 +6228,7 @@ pub unsafe fn vcmlaq_rot180_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" @@ -6627,7 +6237,6 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - } _vcmla_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] #[doc = "## Safety"] @@ -6638,7 +6247,7 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" @@ -6651,7 +6260,6 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - let ret_val: float32x2_t = _vcmla_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] #[doc = "## Safety"] @@ -6662,7 +6270,7 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" @@ -6671,7 +6279,6 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) } _vcmlaq_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] #[doc = "## Safety"] @@ -6682,7 +6289,7 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" @@ -6695,7 +6302,6 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) let ret_val: float32x4_t = _vcmlaq_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] #[doc = "## Safety"] @@ -6706,7 +6312,7 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" @@ -6715,7 +6321,6 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) } _vcmlaq_rot270_f64(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] #[doc = "## Safety"] @@ -6726,7 +6331,7 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" @@ -6739,7 +6344,6 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) let ret_val: float64x2_t = _vcmlaq_rot270_f64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] #[doc = "## Safety"] @@ -6759,7 +6363,6 @@ pub unsafe fn vcmla_rot270_lane_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] #[doc = "## Safety"] @@ -6783,7 +6386,6 @@ pub unsafe fn vcmla_rot270_lane_f32( let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] #[doc = "## Safety"] @@ -6812,7 +6414,6 @@ pub unsafe fn vcmlaq_rot270_lane_f32( ); vcmlaq_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] #[doc = "## Safety"] @@ -6845,7 +6446,6 @@ pub unsafe fn vcmlaq_rot270_lane_f32( let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] #[doc = "## Safety"] @@ -6865,7 +6465,6 @@ pub unsafe fn vcmla_rot270_laneq_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] #[doc = "## Safety"] @@ -6889,7 +6488,6 @@ pub unsafe fn vcmla_rot270_laneq_f32( let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] #[doc = "## Safety"] @@ -6918,7 +6516,6 @@ pub unsafe fn vcmlaq_rot270_laneq_f32( ); vcmlaq_rot270_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] #[doc = "## Safety"] @@ -6951,7 +6548,6 @@ pub unsafe fn vcmlaq_rot270_laneq_f32( let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] #[doc = "## Safety"] @@ -6962,7 +6558,7 @@ pub unsafe fn vcmlaq_rot270_laneq_f32( #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" @@ -6971,7 +6567,6 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> } _vcmla_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] #[doc = "## Safety"] @@ -6982,7 +6577,7 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" @@ -6995,7 +6590,6 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> let ret_val: float32x2_t = _vcmla_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] #[doc = "## Safety"] @@ -7006,7 +6600,7 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" @@ -7015,7 +6609,6 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - } _vcmlaq_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] #[doc = "## Safety"] @@ -7026,7 +6619,7 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" @@ -7039,7 +6632,6 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - let ret_val: float32x4_t = _vcmlaq_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] #[doc = "## Safety"] @@ -7050,7 +6642,7 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" @@ -7059,7 +6651,6 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) - } _vcmlaq_rot90_f64(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] #[doc = "## Safety"] @@ -7070,7 +6661,7 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) - #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" @@ -7083,7 +6674,6 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) - let ret_val: float64x2_t = _vcmlaq_rot90_f64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] #[doc = "## Safety"] @@ -7103,7 +6693,6 @@ pub unsafe fn vcmla_rot90_lane_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] #[doc = "## Safety"] @@ -7127,7 +6716,6 @@ pub unsafe fn vcmla_rot90_lane_f32( let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] #[doc = "## Safety"] @@ -7156,7 +6744,6 @@ pub unsafe fn vcmlaq_rot90_lane_f32( ); vcmlaq_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] #[doc = "## Safety"] @@ -7189,7 +6776,6 @@ pub unsafe fn vcmlaq_rot90_lane_f32( let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] #[doc = "## Safety"] @@ -7209,7 +6795,6 @@ pub unsafe fn vcmla_rot90_laneq_f32( let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] #[doc = "## Safety"] @@ -7233,7 +6818,6 @@ pub unsafe fn vcmla_rot90_laneq_f32( let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] #[doc = "## Safety"] @@ -7262,7 +6846,6 @@ pub unsafe fn vcmlaq_rot90_laneq_f32( ); vcmlaq_rot90_f32(a, b, c) } - #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] #[doc = "## Safety"] @@ -7295,7 +6878,6 @@ pub unsafe fn vcmlaq_rot90_laneq_f32( let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] #[doc = "## Safety"] @@ -7318,7 +6900,6 @@ pub unsafe fn vcopy_lane_f32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] #[doc = "## Safety"] @@ -7344,7 +6925,6 @@ pub unsafe fn vcopy_lane_f32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] #[doc = "## Safety"] @@ -7373,7 +6953,6 @@ pub unsafe fn vcopy_lane_s8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] #[doc = "## Safety"] @@ -7405,7 +6984,6 @@ pub unsafe fn vcopy_lane_s8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] #[doc = "## Safety"] @@ -7430,7 +7008,6 @@ pub unsafe fn vcopy_lane_s16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] #[doc = "## Safety"] @@ -7458,7 +7035,6 @@ pub unsafe fn vcopy_lane_s16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] #[doc = "## Safety"] @@ -7481,7 +7057,6 @@ pub unsafe fn vcopy_lane_s32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] #[doc = "## Safety"] @@ -7507,7 +7082,6 @@ pub unsafe fn vcopy_lane_s32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] #[doc = "## Safety"] @@ -7536,7 +7110,6 @@ pub unsafe fn vcopy_lane_u8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] #[doc = "## Safety"] @@ -7568,7 +7141,6 @@ pub unsafe fn vcopy_lane_u8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] #[doc = "## Safety"] @@ -7593,7 +7165,6 @@ pub unsafe fn vcopy_lane_u16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] #[doc = "## Safety"] @@ -7621,7 +7192,6 @@ pub unsafe fn vcopy_lane_u16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] #[doc = "## Safety"] @@ -7644,7 +7214,6 @@ pub unsafe fn vcopy_lane_u32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] #[doc = "## Safety"] @@ -7670,7 +7239,6 @@ pub unsafe fn vcopy_lane_u32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] #[doc = "## Safety"] @@ -7699,7 +7267,6 @@ pub unsafe fn vcopy_lane_p8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] #[doc = "## Safety"] @@ -7731,7 +7298,6 @@ pub unsafe fn vcopy_lane_p8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] #[doc = "## Safety"] @@ -7756,7 +7322,6 @@ pub unsafe fn vcopy_lane_p16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] #[doc = "## Safety"] @@ -7784,7 +7349,6 @@ pub unsafe fn vcopy_lane_p16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] #[doc = "## Safety"] @@ -7808,7 +7372,6 @@ pub unsafe fn vcopy_laneq_f32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] #[doc = "## Safety"] @@ -7835,7 +7398,6 @@ pub unsafe fn vcopy_laneq_f32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] #[doc = "## Safety"] @@ -7865,7 +7427,6 @@ pub unsafe fn vcopy_laneq_s8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] #[doc = "## Safety"] @@ -7898,7 +7459,6 @@ pub unsafe fn vcopy_laneq_s8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] #[doc = "## Safety"] @@ -7924,7 +7484,6 @@ pub unsafe fn vcopy_laneq_s16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] #[doc = "## Safety"] @@ -7953,7 +7512,6 @@ pub unsafe fn vcopy_laneq_s16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] #[doc = "## Safety"] @@ -7977,7 +7535,6 @@ pub unsafe fn vcopy_laneq_s32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] #[doc = "## Safety"] @@ -8004,7 +7561,6 @@ pub unsafe fn vcopy_laneq_s32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] #[doc = "## Safety"] @@ -8034,7 +7590,6 @@ pub unsafe fn vcopy_laneq_u8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] #[doc = "## Safety"] @@ -8067,7 +7622,6 @@ pub unsafe fn vcopy_laneq_u8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] #[doc = "## Safety"] @@ -8093,7 +7647,6 @@ pub unsafe fn vcopy_laneq_u16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] #[doc = "## Safety"] @@ -8122,7 +7675,6 @@ pub unsafe fn vcopy_laneq_u16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] #[doc = "## Safety"] @@ -8146,7 +7698,6 @@ pub unsafe fn vcopy_laneq_u32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] #[doc = "## Safety"] @@ -8173,7 +7724,6 @@ pub unsafe fn vcopy_laneq_u32( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] #[doc = "## Safety"] @@ -8203,7 +7753,6 @@ pub unsafe fn vcopy_laneq_p8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] #[doc = "## Safety"] @@ -8236,7 +7785,6 @@ pub unsafe fn vcopy_laneq_p8( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] #[doc = "## Safety"] @@ -8262,7 +7810,6 @@ pub unsafe fn vcopy_laneq_p16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] #[doc = "## Safety"] @@ -8291,7 +7838,6 @@ pub unsafe fn vcopy_laneq_p16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] #[doc = "## Safety"] @@ -8317,7 +7863,6 @@ pub unsafe fn vcopyq_lane_f32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] #[doc = "## Safety"] @@ -8346,7 +7891,6 @@ pub unsafe fn vcopyq_lane_f32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] #[doc = "## Safety"] @@ -8370,7 +7914,6 @@ pub unsafe fn vcopyq_lane_f64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] #[doc = "## Safety"] @@ -8396,7 +7939,6 @@ pub unsafe fn vcopyq_lane_f64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] #[doc = "## Safety"] @@ -8420,7 +7962,6 @@ pub unsafe fn vcopyq_lane_s64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] #[doc = "## Safety"] @@ -8446,7 +7987,6 @@ pub unsafe fn vcopyq_lane_s64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] #[doc = "## Safety"] @@ -8470,7 +8010,6 @@ pub unsafe fn vcopyq_lane_u64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] #[doc = "## Safety"] @@ -8496,7 +8035,6 @@ pub unsafe fn vcopyq_lane_u64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] #[doc = "## Safety"] @@ -8520,7 +8058,6 @@ pub unsafe fn vcopyq_lane_p64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] #[doc = "## Safety"] @@ -8546,7 +8083,6 @@ pub unsafe fn vcopyq_lane_p64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] #[doc = "## Safety"] @@ -8920,7 +8456,6 @@ pub unsafe fn vcopyq_lane_s8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] #[doc = "## Safety"] @@ -9301,7 +8836,6 @@ pub unsafe fn vcopyq_lane_s8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] #[doc = "## Safety"] @@ -9331,7 +8865,6 @@ pub unsafe fn vcopyq_lane_s16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] #[doc = "## Safety"] @@ -9364,7 +8897,6 @@ pub unsafe fn vcopyq_lane_s16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] #[doc = "## Safety"] @@ -9390,7 +8922,6 @@ pub unsafe fn vcopyq_lane_s32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] #[doc = "## Safety"] @@ -9419,7 +8950,6 @@ pub unsafe fn vcopyq_lane_s32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] #[doc = "## Safety"] @@ -9793,7 +9323,6 @@ pub unsafe fn vcopyq_lane_u8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] #[doc = "## Safety"] @@ -10174,7 +9703,6 @@ pub unsafe fn vcopyq_lane_u8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] #[doc = "## Safety"] @@ -10204,7 +9732,6 @@ pub unsafe fn vcopyq_lane_u16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] #[doc = "## Safety"] @@ -10237,7 +9764,6 @@ pub unsafe fn vcopyq_lane_u16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] #[doc = "## Safety"] @@ -10263,7 +9789,6 @@ pub unsafe fn vcopyq_lane_u32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] #[doc = "## Safety"] @@ -10292,7 +9817,6 @@ pub unsafe fn vcopyq_lane_u32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] #[doc = "## Safety"] @@ -10666,7 +10190,6 @@ pub unsafe fn vcopyq_lane_p8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] #[doc = "## Safety"] @@ -11047,7 +10570,6 @@ pub unsafe fn vcopyq_lane_p8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] #[doc = "## Safety"] @@ -11077,7 +10599,6 @@ pub unsafe fn vcopyq_lane_p16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] #[doc = "## Safety"] @@ -11110,7 +10631,6 @@ pub unsafe fn vcopyq_lane_p16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] #[doc = "## Safety"] @@ -11135,7 +10655,6 @@ pub unsafe fn vcopyq_laneq_f32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] #[doc = "## Safety"] @@ -11163,7 +10682,6 @@ pub unsafe fn vcopyq_laneq_f32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] #[doc = "## Safety"] @@ -11186,7 +10704,6 @@ pub unsafe fn vcopyq_laneq_f64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] #[doc = "## Safety"] @@ -11212,7 +10729,6 @@ pub unsafe fn vcopyq_laneq_f64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] #[doc = "## Safety"] @@ -11585,7 +11101,6 @@ pub unsafe fn vcopyq_laneq_s8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] #[doc = "## Safety"] @@ -11965,7 +11480,6 @@ pub unsafe fn vcopyq_laneq_s8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] #[doc = "## Safety"] @@ -11994,7 +11508,6 @@ pub unsafe fn vcopyq_laneq_s16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] #[doc = "## Safety"] @@ -12026,7 +11539,6 @@ pub unsafe fn vcopyq_laneq_s16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] #[doc = "## Safety"] @@ -12051,7 +11563,6 @@ pub unsafe fn vcopyq_laneq_s32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] #[doc = "## Safety"] @@ -12079,7 +11590,6 @@ pub unsafe fn vcopyq_laneq_s32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] #[doc = "## Safety"] @@ -12102,7 +11612,6 @@ pub unsafe fn vcopyq_laneq_s64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] #[doc = "## Safety"] @@ -12128,7 +11637,6 @@ pub unsafe fn vcopyq_laneq_s64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] #[doc = "## Safety"] @@ -12501,7 +12009,6 @@ pub unsafe fn vcopyq_laneq_u8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] #[doc = "## Safety"] @@ -12881,7 +12388,6 @@ pub unsafe fn vcopyq_laneq_u8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] #[doc = "## Safety"] @@ -12910,7 +12416,6 @@ pub unsafe fn vcopyq_laneq_u16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] #[doc = "## Safety"] @@ -12942,7 +12447,6 @@ pub unsafe fn vcopyq_laneq_u16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] #[doc = "## Safety"] @@ -12967,7 +12471,6 @@ pub unsafe fn vcopyq_laneq_u32( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] #[doc = "## Safety"] @@ -12995,7 +12498,6 @@ pub unsafe fn vcopyq_laneq_u32( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] #[doc = "## Safety"] @@ -13018,7 +12520,6 @@ pub unsafe fn vcopyq_laneq_u64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] #[doc = "## Safety"] @@ -13044,7 +12545,6 @@ pub unsafe fn vcopyq_laneq_u64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] #[doc = "## Safety"] @@ -13417,7 +12917,6 @@ pub unsafe fn vcopyq_laneq_p8( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] #[doc = "## Safety"] @@ -13797,7 +13296,6 @@ pub unsafe fn vcopyq_laneq_p8( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] #[doc = "## Safety"] @@ -13826,7 +13324,6 @@ pub unsafe fn vcopyq_laneq_p16( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] #[doc = "## Safety"] @@ -13858,7 +13355,6 @@ pub unsafe fn vcopyq_laneq_p16( }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] #[doc = "## Safety"] @@ -13881,7 +13377,6 @@ pub unsafe fn vcopyq_laneq_p64( _ => unreachable_unchecked(), } } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] #[doc = "## Safety"] @@ -13907,7 +13402,6 @@ pub unsafe fn vcopyq_laneq_p64( }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] #[doc = "## Safety"] @@ -13919,7 +13413,6 @@ pub unsafe fn vcopyq_laneq_p64( pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { transmute(a) } - #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] #[doc = "## Safety"] @@ -13932,7 +13425,6 @@ pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { simd_cast(a) } - #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] #[doc = "## Safety"] @@ -13947,7 +13439,6 @@ pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] #[doc = "## Safety"] @@ -13960,7 +13451,6 @@ pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { simd_cast(a) } - #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] #[doc = "## Safety"] @@ -13975,7 +13465,6 @@ pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] #[doc = "## Safety"] @@ -13987,7 +13476,6 @@ pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] #[doc = "## Safety"] @@ -14000,7 +13488,6 @@ pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] #[doc = "## Safety"] @@ -14015,7 +13502,6 @@ pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] #[doc = "## Safety"] @@ -14027,7 +13513,6 @@ pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] #[doc = "## Safety"] @@ -14040,7 +13525,6 @@ pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] #[doc = "## Safety"] @@ -14055,7 +13539,6 @@ pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] #[doc = "## Safety"] @@ -14068,7 +13551,6 @@ pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) } - #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] #[doc = "## Safety"] @@ -14084,7 +13566,6 @@ pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] #[doc = "## Safety"] @@ -14098,7 +13579,6 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); simd_cast(b) } - #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] #[doc = "## Safety"] @@ -14114,7 +13594,6 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { let ret_val: float64x2_t = simd_cast(b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] #[doc = "## Safety"] @@ -14126,7 +13605,7 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" @@ -14135,7 +13614,6 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { } _vcvt_n_f64_s64(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] #[doc = "## Safety"] @@ -14148,7 +13626,7 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" @@ -14157,7 +13635,6 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { } _vcvtq_n_f64_s64(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] #[doc = "## Safety"] @@ -14170,7 +13647,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" @@ -14181,7 +13658,6 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vcvtq_n_f64_s64(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] #[doc = "## Safety"] @@ -14193,7 +13669,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" @@ -14202,7 +13678,6 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { } _vcvt_n_f64_u64(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] #[doc = "## Safety"] @@ -14215,7 +13690,7 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" @@ -14224,7 +13699,6 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { } _vcvtq_n_f64_u64(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] #[doc = "## Safety"] @@ -14237,7 +13711,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" @@ -14248,7 +13722,6 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vcvtq_n_f64_u64(a.as_signed(), N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] #[doc = "## Safety"] @@ -14260,7 +13733,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" @@ -14269,7 +13742,6 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvt_n_s64_f64(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] #[doc = "## Safety"] @@ -14282,7 +13754,7 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" @@ -14291,7 +13763,6 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtq_n_s64_f64(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] #[doc = "## Safety"] @@ -14304,7 +13775,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" @@ -14315,7 +13786,6 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtq_n_s64_f64(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] #[doc = "## Safety"] @@ -14327,7 +13797,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" @@ -14336,7 +13806,6 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvt_n_u64_f64(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] #[doc = "## Safety"] @@ -14349,7 +13818,7 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" @@ -14358,7 +13827,6 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtq_n_u64_f64(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] #[doc = "## Safety"] @@ -14371,7 +13839,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" @@ -14382,7 +13850,6 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtq_n_u64_f64(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] #[doc = "## Safety"] @@ -14392,7 +13859,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v1i64.v1f64" @@ -14401,7 +13868,6 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvt_s64_f64(a) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] #[doc = "## Safety"] @@ -14412,7 +13878,7 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v2i64.v2f64" @@ -14421,7 +13887,6 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtq_s64_f64(a) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] #[doc = "## Safety"] @@ -14432,7 +13897,7 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v2i64.v2f64" @@ -14443,7 +13908,6 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtq_s64_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] #[doc = "## Safety"] @@ -14453,7 +13917,7 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v1i64.v1f64" @@ -14462,7 +13926,6 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvt_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] #[doc = "## Safety"] @@ -14473,7 +13936,7 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i64.v2f64" @@ -14482,7 +13945,6 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtq_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] #[doc = "## Safety"] @@ -14493,7 +13955,7 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i64.v2f64" @@ -14504,7 +13966,6 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtq_u64_f64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] #[doc = "## Safety"] @@ -14515,7 +13976,7 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" @@ -14524,7 +13985,6 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvta_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] #[doc = "## Safety"] @@ -14535,7 +13995,7 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" @@ -14546,7 +14006,6 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvta_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] #[doc = "## Safety"] @@ -14557,7 +14016,7 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" @@ -14566,7 +14025,6 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtaq_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] #[doc = "## Safety"] @@ -14577,7 +14035,7 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" @@ -14588,7 +14046,6 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtaq_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] #[doc = "## Safety"] @@ -14598,7 +14055,7 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" @@ -14607,7 +14064,6 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvta_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] #[doc = "## Safety"] @@ -14618,7 +14074,7 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" @@ -14627,7 +14083,6 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtaq_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] #[doc = "## Safety"] @@ -14638,7 +14093,7 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" @@ -14649,7 +14104,6 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtaq_s64_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] #[doc = "## Safety"] @@ -14660,7 +14114,7 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" @@ -14669,7 +14123,6 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvta_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] #[doc = "## Safety"] @@ -14680,7 +14133,7 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" @@ -14691,7 +14144,6 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvta_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] #[doc = "## Safety"] @@ -14702,7 +14154,7 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" @@ -14711,7 +14163,6 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtaq_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] #[doc = "## Safety"] @@ -14722,7 +14173,7 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" @@ -14733,7 +14184,6 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtaq_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] #[doc = "## Safety"] @@ -14743,7 +14193,7 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" @@ -14752,7 +14202,6 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvta_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] #[doc = "## Safety"] @@ -14763,7 +14212,7 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" @@ -14772,7 +14221,6 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtaq_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] #[doc = "## Safety"] @@ -14783,7 +14231,7 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" @@ -14794,7 +14242,6 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtaq_u64_f64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] #[doc = "## Safety"] @@ -14804,7 +14251,7 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i32.f32" @@ -14813,7 +14260,6 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { } _vcvtas_s32_f32(a) } - #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] #[doc = "## Safety"] @@ -14823,7 +14269,7 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i64.f64" @@ -14832,7 +14278,6 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { } _vcvtad_s64_f64(a) } - #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] #[doc = "## Safety"] @@ -14842,7 +14287,7 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i32.f32" @@ -14851,7 +14296,6 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { } _vcvtas_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] #[doc = "## Safety"] @@ -14861,7 +14305,7 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i64.f64" @@ -14870,7 +14314,6 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { } _vcvtad_u64_f64(a).as_unsigned() } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] #[doc = "## Safety"] @@ -14882,7 +14325,6 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { a as f64 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] #[doc = "## Safety"] @@ -14894,7 +14336,6 @@ pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { a as f32 } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] #[doc = "## Safety"] @@ -14905,7 +14346,7 @@ pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" @@ -14914,7 +14355,6 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvtm_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] #[doc = "## Safety"] @@ -14925,7 +14365,7 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" @@ -14936,7 +14376,6 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvtm_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] #[doc = "## Safety"] @@ -14947,7 +14386,7 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" @@ -14956,7 +14395,6 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtmq_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] #[doc = "## Safety"] @@ -14967,7 +14405,7 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" @@ -14978,7 +14416,6 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtmq_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] #[doc = "## Safety"] @@ -14988,7 +14425,7 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" @@ -14997,7 +14434,6 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvtm_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] #[doc = "## Safety"] @@ -15008,7 +14444,7 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" @@ -15017,7 +14453,6 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtmq_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] #[doc = "## Safety"] @@ -15028,7 +14463,7 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" @@ -15039,7 +14474,6 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtmq_s64_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] #[doc = "## Safety"] @@ -15050,7 +14484,7 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" @@ -15059,7 +14493,6 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvtm_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] #[doc = "## Safety"] @@ -15070,7 +14503,7 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" @@ -15081,7 +14514,6 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvtm_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] #[doc = "## Safety"] @@ -15092,7 +14524,7 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" @@ -15101,7 +14533,6 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtmq_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] #[doc = "## Safety"] @@ -15112,7 +14543,7 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" @@ -15123,7 +14554,6 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtmq_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] #[doc = "## Safety"] @@ -15133,7 +14563,7 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" @@ -15142,7 +14572,6 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvtm_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] #[doc = "## Safety"] @@ -15153,7 +14582,7 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" @@ -15162,7 +14591,6 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtmq_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] #[doc = "## Safety"] @@ -15173,7 +14601,7 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" @@ -15184,7 +14612,6 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtmq_u64_f64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] #[doc = "## Safety"] @@ -15194,7 +14621,7 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i32.f32" @@ -15203,7 +14630,6 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { } _vcvtms_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] #[doc = "## Safety"] @@ -15213,7 +14639,7 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i64.f64" @@ -15222,7 +14648,6 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { } _vcvtmd_s64_f64(a) } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] #[doc = "## Safety"] @@ -15232,7 +14657,7 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" @@ -15241,7 +14666,6 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { } _vcvtms_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] #[doc = "## Safety"] @@ -15251,7 +14675,7 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" @@ -15260,7 +14684,6 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { } _vcvtmd_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] #[doc = "## Safety"] @@ -15271,7 +14694,7 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" @@ -15280,7 +14703,6 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvtn_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] #[doc = "## Safety"] @@ -15291,7 +14713,7 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" @@ -15302,7 +14724,6 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvtn_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] #[doc = "## Safety"] @@ -15313,7 +14734,7 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" @@ -15322,7 +14743,6 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtnq_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] #[doc = "## Safety"] @@ -15333,7 +14753,7 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" @@ -15344,7 +14764,6 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtnq_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] #[doc = "## Safety"] @@ -15354,7 +14773,7 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" @@ -15363,7 +14782,6 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvtn_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] #[doc = "## Safety"] @@ -15374,7 +14792,7 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" @@ -15383,7 +14801,6 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtnq_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] #[doc = "## Safety"] @@ -15394,7 +14811,7 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" @@ -15405,7 +14822,6 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtnq_s64_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] #[doc = "## Safety"] @@ -15416,7 +14832,7 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" @@ -15425,7 +14841,6 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvtn_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] #[doc = "## Safety"] @@ -15436,7 +14851,7 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" @@ -15447,7 +14862,6 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvtn_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] #[doc = "## Safety"] @@ -15458,7 +14872,7 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" @@ -15467,7 +14881,6 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtnq_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] #[doc = "## Safety"] @@ -15478,7 +14891,7 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" @@ -15489,7 +14902,6 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtnq_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] #[doc = "## Safety"] @@ -15499,7 +14911,7 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" @@ -15508,7 +14920,6 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvtn_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] #[doc = "## Safety"] @@ -15519,7 +14930,7 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" @@ -15528,7 +14939,6 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtnq_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] #[doc = "## Safety"] @@ -15539,7 +14949,7 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" @@ -15550,7 +14960,6 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtnq_u64_f64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] #[doc = "## Safety"] @@ -15560,7 +14969,7 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i32.f32" @@ -15569,7 +14978,6 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { } _vcvtns_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] #[doc = "## Safety"] @@ -15579,7 +14987,7 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i64.f64" @@ -15588,7 +14996,6 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { } _vcvtnd_s64_f64(a) } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] #[doc = "## Safety"] @@ -15598,7 +15005,7 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" @@ -15607,7 +15014,6 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { } _vcvtns_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] #[doc = "## Safety"] @@ -15617,7 +15023,7 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" @@ -15626,7 +15032,6 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { } _vcvtnd_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] #[doc = "## Safety"] @@ -15637,7 +15042,7 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" @@ -15646,7 +15051,6 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvtp_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] #[doc = "## Safety"] @@ -15657,7 +15061,7 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" @@ -15668,7 +15072,6 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvtp_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] #[doc = "## Safety"] @@ -15679,7 +15082,7 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" @@ -15688,7 +15091,6 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtpq_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] #[doc = "## Safety"] @@ -15699,7 +15101,7 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" @@ -15710,7 +15112,6 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtpq_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] #[doc = "## Safety"] @@ -15720,7 +15121,7 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" @@ -15729,7 +15130,6 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { } _vcvtp_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] #[doc = "## Safety"] @@ -15740,7 +15140,7 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" @@ -15749,7 +15149,6 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { } _vcvtpq_s64_f64(a) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] #[doc = "## Safety"] @@ -15760,7 +15159,7 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" @@ -15771,7 +15170,6 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vcvtpq_s64_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] #[doc = "## Safety"] @@ -15782,7 +15180,7 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" @@ -15791,7 +15189,6 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvtp_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] #[doc = "## Safety"] @@ -15802,7 +15199,7 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" @@ -15813,7 +15210,6 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvtp_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] #[doc = "## Safety"] @@ -15824,7 +15220,7 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" @@ -15833,7 +15229,6 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtpq_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] #[doc = "## Safety"] @@ -15844,7 +15239,7 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" @@ -15855,7 +15250,6 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtpq_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] #[doc = "## Safety"] @@ -15865,7 +15259,7 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" @@ -15874,7 +15268,6 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { } _vcvtp_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] #[doc = "## Safety"] @@ -15885,7 +15278,7 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" @@ -15894,7 +15287,6 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { } _vcvtpq_u64_f64(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] #[doc = "## Safety"] @@ -15905,7 +15297,7 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" @@ -15916,7 +15308,6 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vcvtpq_u64_f64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] #[doc = "## Safety"] @@ -15926,7 +15317,7 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i32.f32" @@ -15935,7 +15326,6 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { } _vcvtps_s32_f32(a) } - #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] #[doc = "## Safety"] @@ -15945,7 +15335,7 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i64.f64" @@ -15954,7 +15344,6 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { } _vcvtpd_s64_f64(a) } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] #[doc = "## Safety"] @@ -15964,7 +15353,7 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" @@ -15973,7 +15362,6 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { } _vcvtps_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] #[doc = "## Safety"] @@ -15983,7 +15371,7 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" @@ -15992,7 +15380,6 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { } _vcvtpd_u64_f64(a).as_unsigned() } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] #[doc = "## Safety"] @@ -16004,7 +15391,6 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { a as f32 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] #[doc = "## Safety"] @@ -16016,7 +15402,6 @@ pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { a as f64 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] #[doc = "## Safety"] @@ -16028,7 +15413,7 @@ pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" @@ -16037,7 +15422,6 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { } _vcvts_n_f32_s32(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] #[doc = "## Safety"] @@ -16049,7 +15433,7 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" @@ -16058,7 +15442,6 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { } _vcvtd_n_f64_s64(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] #[doc = "## Safety"] @@ -16070,7 +15453,7 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" @@ -16079,7 +15462,6 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { } _vcvts_n_f32_u32(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] #[doc = "## Safety"] @@ -16091,7 +15473,7 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" @@ -16100,7 +15482,6 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { } _vcvtd_n_f64_u64(a.as_signed(), N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] #[doc = "## Safety"] @@ -16112,7 +15493,7 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" @@ -16121,7 +15502,6 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { } _vcvts_n_s32_f32(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] #[doc = "## Safety"] @@ -16133,7 +15513,7 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" @@ -16142,7 +15522,6 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { } _vcvtd_n_s64_f64(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] #[doc = "## Safety"] @@ -16154,7 +15533,7 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" @@ -16163,7 +15542,6 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { } _vcvts_n_u32_f32(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] #[doc = "## Safety"] @@ -16175,7 +15553,7 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" @@ -16184,7 +15562,6 @@ pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { } _vcvtd_n_u64_f64(a, N).as_unsigned() } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] #[doc = "## Safety"] @@ -16196,7 +15573,6 @@ pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { a as i32 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] #[doc = "## Safety"] @@ -16208,7 +15584,6 @@ pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { a as i64 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] #[doc = "## Safety"] @@ -16220,7 +15595,6 @@ pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { a as u32 } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] #[doc = "## Safety"] @@ -16232,7 +15606,6 @@ pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { a as u64 } - #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] #[doc = "## Safety"] @@ -16243,7 +15616,7 @@ pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" @@ -16252,7 +15625,6 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { } _vcvtx_f32_f64(a) } - #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] #[doc = "## Safety"] @@ -16263,7 +15635,7 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" @@ -16274,7 +15646,6 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcvtx_f32_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] #[doc = "## Safety"] @@ -16287,7 +15658,6 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) } - #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] #[doc = "## Safety"] @@ -16303,7 +15673,6 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t let ret_val: float32x4_t = simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] #[doc = "## Safety"] @@ -16315,7 +15684,6 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] #[doc = "## Safety"] @@ -16328,7 +15696,6 @@ pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_div(a, b) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] #[doc = "## Safety"] @@ -16344,7 +15711,6 @@ pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_div(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] #[doc = "## Safety"] @@ -16357,7 +15723,6 @@ pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_div(a, b) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] #[doc = "## Safety"] @@ -16373,7 +15738,6 @@ pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_div(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] #[doc = "## Safety"] @@ -16385,7 +15749,6 @@ pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_div(a, b) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] #[doc = "## Safety"] @@ -16398,7 +15761,6 @@ pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_div(a, b) } - #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] #[doc = "## Safety"] @@ -16414,7 +15776,6 @@ pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_div(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] #[doc = "## Safety"] @@ -16435,7 +15796,6 @@ pub unsafe fn vdot_laneq_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vdot_s32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] #[doc = "## Safety"] @@ -16460,7 +15820,6 @@ pub unsafe fn vdot_laneq_s32( let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] #[doc = "## Safety"] @@ -16481,7 +15840,6 @@ pub unsafe fn vdotq_laneq_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vdotq_s32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] #[doc = "## Safety"] @@ -16506,7 +15864,6 @@ pub unsafe fn vdotq_laneq_s32( let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] #[doc = "## Safety"] @@ -16527,7 +15884,6 @@ pub unsafe fn vdot_laneq_u32( let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vdot_u32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] #[doc = "## Safety"] @@ -16552,7 +15908,6 @@ pub unsafe fn vdot_laneq_u32( let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] #[doc = "## Safety"] @@ -16573,7 +15928,6 @@ pub unsafe fn vdotq_laneq_u32( let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vdotq_u32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] #[doc = "## Safety"] @@ -16598,7 +15952,6 @@ pub unsafe fn vdotq_laneq_u32( let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] #[doc = "## Safety"] @@ -16612,7 +15965,6 @@ pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { static_assert!(N == 0); a } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] #[doc = "## Safety"] @@ -16626,7 +15978,6 @@ pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { static_assert!(N == 0); a } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] #[doc = "## Safety"] @@ -16641,7 +15992,6 @@ pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] #[doc = "## Safety"] @@ -16657,7 +16007,6 @@ pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] #[doc = "## Safety"] @@ -16672,7 +16021,6 @@ pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] #[doc = "## Safety"] @@ -16688,7 +16036,6 @@ pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] #[doc = "## Safety"] @@ -16703,7 +16050,6 @@ pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] #[doc = "## Safety"] @@ -16719,7 +16065,6 @@ pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] #[doc = "## Safety"] @@ -16734,7 +16079,6 @@ pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] #[doc = "## Safety"] @@ -16750,7 +16094,6 @@ pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] #[doc = "## Safety"] @@ -16765,7 +16108,6 @@ pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] #[doc = "## Safety"] @@ -16781,7 +16123,6 @@ pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] #[doc = "## Safety"] @@ -16796,7 +16137,6 @@ pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] #[doc = "## Safety"] @@ -16812,7 +16152,6 @@ pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] #[doc = "## Safety"] @@ -16827,7 +16166,6 @@ pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] #[doc = "## Safety"] @@ -16843,7 +16181,6 @@ pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] #[doc = "## Safety"] @@ -16858,7 +16195,6 @@ pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { static_assert_uimm_bits!(N, 3); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] #[doc = "## Safety"] @@ -16874,7 +16210,6 @@ pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] #[doc = "## Safety"] @@ -16889,7 +16224,6 @@ pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { static_assert_uimm_bits!(N, 4); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] #[doc = "## Safety"] @@ -16905,7 +16239,6 @@ pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] #[doc = "## Safety"] @@ -16920,7 +16253,6 @@ pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { static_assert_uimm_bits!(N, 4); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] #[doc = "## Safety"] @@ -16936,7 +16268,6 @@ pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] #[doc = "## Safety"] @@ -16951,7 +16282,6 @@ pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { static_assert_uimm_bits!(N, 4); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] #[doc = "## Safety"] @@ -16967,7 +16297,6 @@ pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] #[doc = "## Safety"] @@ -16981,7 +16310,6 @@ pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { static_assert!(N == 0); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] #[doc = "## Safety"] @@ -16995,7 +16323,6 @@ pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { static_assert!(N == 0); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] #[doc = "## Safety"] @@ -17009,7 +16336,6 @@ pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { static_assert!(N == 0); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] #[doc = "## Safety"] @@ -17024,7 +16350,6 @@ pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] #[doc = "## Safety"] @@ -17040,7 +16365,6 @@ pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] #[doc = "## Safety"] @@ -17055,7 +16379,6 @@ pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] #[doc = "## Safety"] @@ -17071,7 +16394,6 @@ pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] #[doc = "## Safety"] @@ -17086,7 +16408,6 @@ pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] #[doc = "## Safety"] @@ -17103,7 +16424,6 @@ pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] #[doc = "## Safety"] @@ -17118,7 +16438,6 @@ pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] #[doc = "## Safety"] @@ -17135,7 +16454,6 @@ pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] #[doc = "## Safety"] @@ -17150,7 +16468,6 @@ pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] #[doc = "## Safety"] @@ -17166,7 +16483,6 @@ pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] #[doc = "## Safety"] @@ -17181,7 +16497,6 @@ pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] #[doc = "## Safety"] @@ -17197,7 +16512,6 @@ pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] #[doc = "## Safety"] @@ -17212,7 +16526,6 @@ pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] #[doc = "## Safety"] @@ -17228,7 +16541,6 @@ pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] #[doc = "## Safety"] @@ -17243,7 +16555,6 @@ pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] #[doc = "## Safety"] @@ -17259,7 +16570,6 @@ pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] #[doc = "## Safety"] @@ -17274,7 +16584,6 @@ pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] #[doc = "## Safety"] @@ -17290,7 +16599,6 @@ pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] #[doc = "## Safety"] @@ -17305,7 +16613,6 @@ pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { static_assert_uimm_bits!(N, 1); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] #[doc = "## Safety"] @@ -17321,7 +16628,6 @@ pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] #[doc = "## Safety"] @@ -17336,7 +16642,6 @@ pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] #[doc = "## Safety"] @@ -17352,7 +16657,6 @@ pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] #[doc = "## Safety"] @@ -17367,7 +16671,6 @@ pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] #[doc = "## Safety"] @@ -17383,7 +16686,6 @@ pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] #[doc = "## Safety"] @@ -17398,7 +16700,6 @@ pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] #[doc = "## Safety"] @@ -17414,7 +16715,6 @@ pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] #[doc = "## Safety"] @@ -17429,7 +16729,6 @@ pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] #[doc = "## Safety"] @@ -17445,7 +16744,6 @@ pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] #[doc = "## Safety"] @@ -17460,7 +16758,6 @@ pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] #[doc = "## Safety"] @@ -17476,7 +16773,6 @@ pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] #[doc = "## Safety"] @@ -17491,7 +16787,6 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { static_assert_uimm_bits!(N, 2); simd_extract!(a, N as u32) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] #[doc = "## Safety"] @@ -17507,7 +16802,6 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); simd_extract!(a, N as u32) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] #[doc = "## Safety"] @@ -17518,7 +16812,7 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v16i8" @@ -17527,7 +16821,6 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } _veor3q_s8(a, b, c) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] #[doc = "## Safety"] @@ -17538,7 +16831,7 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v16i8" @@ -17555,7 +16848,6 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] #[doc = "## Safety"] @@ -17566,7 +16858,7 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v8i16" @@ -17575,7 +16867,6 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t } _veor3q_s16(a, b, c) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] #[doc = "## Safety"] @@ -17586,7 +16877,7 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v8i16" @@ -17599,7 +16890,6 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t let ret_val: int16x8_t = _veor3q_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] #[doc = "## Safety"] @@ -17610,7 +16900,7 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v4i32" @@ -17619,7 +16909,6 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t } _veor3q_s32(a, b, c) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] #[doc = "## Safety"] @@ -17630,7 +16919,7 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v4i32" @@ -17643,7 +16932,6 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t let ret_val: int32x4_t = _veor3q_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] #[doc = "## Safety"] @@ -17654,7 +16942,7 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v2i64" @@ -17663,7 +16951,6 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t } _veor3q_s64(a, b, c) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] #[doc = "## Safety"] @@ -17674,7 +16961,7 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v2i64" @@ -17687,7 +16974,6 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t let ret_val: int64x2_t = _veor3q_s64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] #[doc = "## Safety"] @@ -17698,7 +16984,7 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v16i8" @@ -17707,7 +16993,6 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 } _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] #[doc = "## Safety"] @@ -17718,7 +17003,7 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v16i8" @@ -17735,7 +17020,6 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] #[doc = "## Safety"] @@ -17746,7 +17030,7 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v8i16" @@ -17755,7 +17039,6 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x } _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] #[doc = "## Safety"] @@ -17766,7 +17049,7 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v8i16" @@ -17780,7 +17063,6 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] #[doc = "## Safety"] @@ -17791,7 +17073,7 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v4i32" @@ -17800,7 +17082,6 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x } _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] #[doc = "## Safety"] @@ -17811,7 +17092,7 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v4i32" @@ -17825,7 +17106,6 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] #[doc = "## Safety"] @@ -17836,7 +17116,7 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v2i64" @@ -17845,7 +17125,6 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x } _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] #[doc = "## Safety"] @@ -17856,7 +17135,7 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v2i64" @@ -17870,7 +17149,6 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] #[doc = "## Safety"] @@ -17889,7 +17167,6 @@ pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64 _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] #[doc = "## Safety"] @@ -17911,7 +17188,6 @@ pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64 }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] #[doc = "## Safety"] @@ -17930,7 +17206,6 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] #[doc = "## Safety"] @@ -17952,7 +17227,6 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] #[doc = "## Safety"] @@ -17962,7 +17236,7 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmadd))] pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v1f64" @@ -17971,7 +17245,6 @@ pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 } _vfma_f64(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] #[doc = "## Safety"] @@ -17990,7 +17263,6 @@ pub unsafe fn vfma_lane_f32( static_assert_uimm_bits!(LANE, 1); vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] #[doc = "## Safety"] @@ -18013,7 +17285,6 @@ pub unsafe fn vfma_lane_f32( let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] #[doc = "## Safety"] @@ -18032,7 +17303,6 @@ pub unsafe fn vfma_laneq_f32( static_assert_uimm_bits!(LANE, 2); vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] #[doc = "## Safety"] @@ -18055,7 +17325,6 @@ pub unsafe fn vfma_laneq_f32( let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] #[doc = "## Safety"] @@ -18074,7 +17343,6 @@ pub unsafe fn vfmaq_lane_f32( static_assert_uimm_bits!(LANE, 1); vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] #[doc = "## Safety"] @@ -18097,7 +17365,6 @@ pub unsafe fn vfmaq_lane_f32( let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] #[doc = "## Safety"] @@ -18116,7 +17383,6 @@ pub unsafe fn vfmaq_laneq_f32( static_assert_uimm_bits!(LANE, 2); vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] #[doc = "## Safety"] @@ -18139,7 +17405,6 @@ pub unsafe fn vfmaq_laneq_f32( let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] #[doc = "## Safety"] @@ -18158,7 +17423,6 @@ pub unsafe fn vfmaq_laneq_f64( static_assert_uimm_bits!(LANE, 1); vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] #[doc = "## Safety"] @@ -18181,7 +17445,6 @@ pub unsafe fn vfmaq_laneq_f64( let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] #[doc = "## Safety"] @@ -18199,7 +17462,6 @@ pub unsafe fn vfma_lane_f64( static_assert!(LANE == 0); vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] #[doc = "## Safety"] @@ -18218,7 +17480,6 @@ pub unsafe fn vfma_laneq_f64( static_assert_uimm_bits!(LANE, 1); vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] #[doc = "## Safety"] @@ -18238,7 +17499,6 @@ pub unsafe fn vfma_laneq_f64( let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] #[doc = "## Safety"] @@ -18250,7 +17510,6 @@ pub unsafe fn vfma_laneq_f64( pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { vfma_f64(a, b, vdup_n_f64(c)) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] #[doc = "## Safety"] @@ -18261,7 +17520,7 @@ pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64" @@ -18272,7 +17531,6 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> let c: f64 = simd_extract!(c, LANE as u32); _vfmad_lane_f64(b, c, a) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] #[doc = "## Safety"] @@ -18283,7 +17541,7 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmla))] pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v2f64" @@ -18292,7 +17550,6 @@ pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float } _vfmaq_f64(b, c, a) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] #[doc = "## Safety"] @@ -18303,7 +17560,7 @@ pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmla))] pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v2f64" @@ -18316,7 +17573,6 @@ pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float let ret_val: float64x2_t = _vfmaq_f64(b, c, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] #[doc = "## Safety"] @@ -18335,7 +17591,6 @@ pub unsafe fn vfmaq_lane_f64( static_assert!(LANE == 0); vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] #[doc = "## Safety"] @@ -18357,7 +17612,6 @@ pub unsafe fn vfmaq_lane_f64( let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] #[doc = "## Safety"] @@ -18370,7 +17624,6 @@ pub unsafe fn vfmaq_lane_f64( pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { vfmaq_f64(a, b, vdupq_n_f64(c)) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] #[doc = "## Safety"] @@ -18386,7 +17639,6 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] #[doc = "## Safety"] @@ -18398,7 +17650,7 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -18409,7 +17661,6 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> let c: f32 = simd_extract!(c, LANE as u32); _vfmas_lane_f32(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] #[doc = "## Safety"] @@ -18421,7 +17672,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -18433,7 +17684,6 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> let c: f32 = simd_extract!(c, LANE as u32); _vfmas_lane_f32(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] #[doc = "## Safety"] @@ -18445,7 +17695,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -18456,7 +17706,6 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - let c: f32 = simd_extract!(c, LANE as u32); _vfmas_laneq_f32(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] #[doc = "## Safety"] @@ -18468,7 +17717,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32" @@ -18480,7 +17729,6 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - let c: f32 = simd_extract!(c, LANE as u32); _vfmas_laneq_f32(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] #[doc = "## Safety"] @@ -18492,7 +17740,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64" @@ -18503,7 +17751,6 @@ pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) - let c: f64 = simd_extract!(c, LANE as u32); _vfmad_laneq_f64(b, c, a) } - #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] #[doc = "## Safety"] @@ -18515,7 +17762,7 @@ pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64" @@ -18527,7 +17774,6 @@ pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) - let c: f64 = simd_extract!(c, LANE as u32); _vfmad_laneq_f64(b, c, a) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] #[doc = "## Safety"] @@ -18540,7 +17786,6 @@ pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 let b: float64x1_t = simd_neg(b); vfma_f64(a, b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] #[doc = "## Safety"] @@ -18559,7 +17804,6 @@ pub unsafe fn vfms_lane_f32( static_assert_uimm_bits!(LANE, 1); vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] #[doc = "## Safety"] @@ -18582,7 +17826,6 @@ pub unsafe fn vfms_lane_f32( let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] #[doc = "## Safety"] @@ -18601,7 +17844,6 @@ pub unsafe fn vfms_laneq_f32( static_assert_uimm_bits!(LANE, 2); vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] #[doc = "## Safety"] @@ -18624,7 +17866,6 @@ pub unsafe fn vfms_laneq_f32( let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] #[doc = "## Safety"] @@ -18643,7 +17884,6 @@ pub unsafe fn vfmsq_lane_f32( static_assert_uimm_bits!(LANE, 1); vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] #[doc = "## Safety"] @@ -18666,7 +17906,6 @@ pub unsafe fn vfmsq_lane_f32( let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] #[doc = "## Safety"] @@ -18685,7 +17924,6 @@ pub unsafe fn vfmsq_laneq_f32( static_assert_uimm_bits!(LANE, 2); vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] #[doc = "## Safety"] @@ -18708,7 +17946,6 @@ pub unsafe fn vfmsq_laneq_f32( let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] #[doc = "## Safety"] @@ -18727,7 +17964,6 @@ pub unsafe fn vfmsq_laneq_f64( static_assert_uimm_bits!(LANE, 1); vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] #[doc = "## Safety"] @@ -18750,7 +17986,6 @@ pub unsafe fn vfmsq_laneq_f64( let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] #[doc = "## Safety"] @@ -18768,7 +18003,6 @@ pub unsafe fn vfms_lane_f64( static_assert!(LANE == 0); vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] #[doc = "## Safety"] @@ -18787,7 +18021,6 @@ pub unsafe fn vfms_laneq_f64( static_assert_uimm_bits!(LANE, 1); vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] #[doc = "## Safety"] @@ -18807,7 +18040,6 @@ pub unsafe fn vfms_laneq_f64( let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] #[doc = "## Safety"] @@ -18819,7 +18051,6 @@ pub unsafe fn vfms_laneq_f64( pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { vfms_f64(a, b, vdup_n_f64(c)) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] #[doc = "## Safety"] @@ -18833,7 +18064,6 @@ pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float let b: float64x2_t = simd_neg(b); vfmaq_f64(a, b, c) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] #[doc = "## Safety"] @@ -18851,7 +18081,6 @@ pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float let ret_val: float64x2_t = vfmaq_f64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] #[doc = "## Safety"] @@ -18870,7 +18099,6 @@ pub unsafe fn vfmsq_lane_f64( static_assert!(LANE == 0); vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] #[doc = "## Safety"] @@ -18892,7 +18120,6 @@ pub unsafe fn vfmsq_lane_f64( let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] #[doc = "## Safety"] @@ -18905,7 +18132,6 @@ pub unsafe fn vfmsq_lane_f64( pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { vfmsq_f64(a, b, vdupq_n_f64(c)) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] #[doc = "## Safety"] @@ -18921,7 +18147,6 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] #[doc = "## Safety"] @@ -18935,7 +18160,6 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { vfmas_lane_f32::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] #[doc = "## Safety"] @@ -18950,7 +18174,6 @@ pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); vfmas_lane_f32::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] #[doc = "## Safety"] @@ -18964,7 +18187,6 @@ pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { vfmas_laneq_f32::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] #[doc = "## Safety"] @@ -18979,7 +18201,6 @@ pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vfmas_laneq_f32::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] #[doc = "## Safety"] @@ -18992,7 +18213,6 @@ pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) - pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { vfmad_lane_f64::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] #[doc = "## Safety"] @@ -19006,7 +18226,6 @@ pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { vfmad_laneq_f64::(a, -b, c) } - #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] #[doc = "## Safety"] @@ -19021,7 +18240,6 @@ pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); vfmad_laneq_f64::(a, -b, c) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] @@ -19034,7 +18252,6 @@ pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) - pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] @@ -19048,7 +18265,6 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { let ret_val: float32x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] @@ -19061,7 +18277,6 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] @@ -19075,7 +18290,6 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { let ret_val: float32x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"] #[doc = "## Safety"] @@ -19087,7 +18301,6 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] #[doc = "## Safety"] @@ -19100,7 +18313,6 @@ pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] #[doc = "## Safety"] @@ -19114,7 +18326,6 @@ pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { let ret_val: float64x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] @@ -19127,7 +18338,6 @@ pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] @@ -19141,7 +18351,6 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { let ret_val: int8x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] @@ -19154,7 +18363,6 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] @@ -19172,7 +18380,6 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] @@ -19185,7 +18392,6 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] @@ -19199,7 +18405,6 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { let ret_val: int16x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] @@ -19212,7 +18417,6 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] @@ -19226,7 +18430,6 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { let ret_val: int16x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] @@ -19239,7 +18442,6 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] @@ -19253,7 +18455,6 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { let ret_val: int32x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] @@ -19266,7 +18467,6 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] @@ -19280,7 +18480,6 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { let ret_val: int32x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] @@ -19292,7 +18491,6 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] @@ -19305,7 +18503,6 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] @@ -19319,7 +18516,6 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { let ret_val: int64x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] @@ -19332,7 +18528,6 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] @@ -19346,7 +18541,6 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { let ret_val: uint8x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] @@ -19359,7 +18553,6 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] @@ -19377,7 +18570,6 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] @@ -19390,7 +18582,6 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] @@ -19404,7 +18595,6 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { let ret_val: uint16x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] @@ -19417,7 +18607,6 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] @@ -19431,7 +18620,6 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { let ret_val: uint16x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] @@ -19444,7 +18632,6 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] @@ -19458,7 +18645,6 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { let ret_val: uint32x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] @@ -19471,7 +18657,6 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] @@ -19485,7 +18670,6 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { let ret_val: uint32x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] @@ -19497,7 +18681,6 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] @@ -19510,7 +18693,6 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] @@ -19524,7 +18706,6 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { let ret_val: uint64x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] @@ -19537,7 +18718,6 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] @@ -19551,7 +18731,6 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { let ret_val: poly8x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] @@ -19564,7 +18743,6 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] @@ -19582,7 +18760,6 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] @@ -19595,7 +18772,6 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] @@ -19609,7 +18785,6 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { let ret_val: poly16x4_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] @@ -19622,7 +18797,6 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] @@ -19636,7 +18810,6 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { let ret_val: poly16x8_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] @@ -19648,7 +18821,6 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] @@ -19661,7 +18833,6 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { crate::ptr::read_unaligned(ptr.cast()) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] @@ -19675,7 +18846,6 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { let ret_val: poly64x2_t = crate::ptr::read_unaligned(ptr.cast()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] #[doc = "## Safety"] @@ -19685,7 +18855,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" @@ -19694,7 +18864,6 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { } _vld1_f64_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] #[doc = "## Safety"] @@ -19704,7 +18873,7 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" @@ -19713,7 +18882,6 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { } _vld1_f64_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] #[doc = "## Safety"] @@ -19723,7 +18891,7 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" @@ -19732,7 +18900,6 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { } _vld1_f64_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] #[doc = "## Safety"] @@ -19743,7 +18910,7 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" @@ -19752,7 +18919,6 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { } _vld1q_f64_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] #[doc = "## Safety"] @@ -19763,7 +18929,7 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" @@ -19775,7 +18941,6 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] #[doc = "## Safety"] @@ -19786,7 +18951,7 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" @@ -19795,7 +18960,6 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { } _vld1q_f64_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] #[doc = "## Safety"] @@ -19806,7 +18970,7 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" @@ -19819,7 +18983,6 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] #[doc = "## Safety"] @@ -19830,7 +18993,7 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" @@ -19839,7 +19002,6 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { } _vld1q_f64_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] #[doc = "## Safety"] @@ -19850,7 +19012,7 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld1))] pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" @@ -19864,7 +19026,6 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] #[doc = "## Safety"] @@ -19874,7 +19035,7 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" @@ -19883,7 +19044,6 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { } _vld2_dup_f64(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] #[doc = "## Safety"] @@ -19894,7 +19054,7 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" @@ -19903,7 +19063,6 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { } _vld2q_dup_f64(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] #[doc = "## Safety"] @@ -19914,7 +19073,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" @@ -19926,7 +19085,6 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] #[doc = "## Safety"] @@ -19937,7 +19095,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" @@ -19946,7 +19104,6 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { } _vld2q_dup_s64(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] #[doc = "## Safety"] @@ -19957,7 +19114,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" @@ -19969,7 +19126,6 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] #[doc = "## Safety"] @@ -19979,7 +19135,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" @@ -19988,7 +19144,6 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { } _vld2_f64(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] #[doc = "## Safety"] @@ -20000,7 +19155,7 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" @@ -20009,7 +19164,6 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> } _vld2_lane_f64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] #[doc = "## Safety"] @@ -20021,7 +19175,7 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" @@ -20030,7 +19184,6 @@ pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> i } _vld2_lane_s64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] #[doc = "## Safety"] @@ -20044,7 +19197,6 @@ pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> static_assert!(LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] #[doc = "## Safety"] @@ -20058,7 +19210,6 @@ pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> static_assert!(LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] @@ -20071,7 +19222,6 @@ pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { transmute(vld2q_dup_s64(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] @@ -20087,7 +19237,6 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] @@ -20100,7 +19249,6 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { transmute(vld2q_dup_s64(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] @@ -20116,7 +19264,6 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] #[doc = "## Safety"] @@ -20127,7 +19274,7 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" @@ -20136,7 +19283,6 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { } _vld2q_f64(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] #[doc = "## Safety"] @@ -20147,7 +19293,7 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" @@ -20159,7 +19305,6 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] #[doc = "## Safety"] @@ -20170,7 +19315,7 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" @@ -20179,7 +19324,6 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { } _vld2q_s64(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] #[doc = "## Safety"] @@ -20190,7 +19334,7 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" @@ -20202,7 +19346,6 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] #[doc = "## Safety"] @@ -20215,7 +19358,7 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" @@ -20225,7 +19368,6 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - } _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] #[doc = "## Safety"] @@ -20238,7 +19380,7 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" @@ -20254,7 +19396,6 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] #[doc = "## Safety"] @@ -20267,7 +19408,7 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" @@ -20276,7 +19417,6 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in } _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] #[doc = "## Safety"] @@ -20289,7 +19429,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" @@ -20320,7 +19460,6 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] #[doc = "## Safety"] @@ -20333,7 +19472,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" @@ -20342,7 +19481,6 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> } _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] #[doc = "## Safety"] @@ -20355,7 +19493,7 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" @@ -20370,7 +19508,6 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] #[doc = "## Safety"] @@ -20385,7 +19522,6 @@ pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] #[doc = "## Safety"] @@ -20406,7 +19542,6 @@ pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] #[doc = "## Safety"] @@ -20421,7 +19556,6 @@ pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> u static_assert_uimm_bits!(LANE, 4); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] #[doc = "## Safety"] @@ -20458,7 +19592,6 @@ pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> u ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] #[doc = "## Safety"] @@ -20473,7 +19606,6 @@ pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] #[doc = "## Safety"] @@ -20494,7 +19626,6 @@ pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] #[doc = "## Safety"] @@ -20509,7 +19640,6 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p static_assert_uimm_bits!(LANE, 4); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] #[doc = "## Safety"] @@ -20546,7 +19676,6 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] @@ -20559,7 +19688,6 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { transmute(vld2q_s64(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] @@ -20575,7 +19703,6 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] #[doc = "## Safety"] @@ -20588,7 +19715,6 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { transmute(vld2q_s64(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] #[doc = "## Safety"] @@ -20604,7 +19730,6 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] #[doc = "## Safety"] @@ -20614,7 +19739,7 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" @@ -20623,7 +19748,6 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { } _vld3_dup_f64(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] #[doc = "## Safety"] @@ -20634,7 +19758,7 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" @@ -20643,7 +19767,6 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { } _vld3q_dup_f64(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] #[doc = "## Safety"] @@ -20654,7 +19777,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" @@ -20667,7 +19790,6 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] #[doc = "## Safety"] @@ -20678,7 +19800,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" @@ -20687,7 +19809,6 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { } _vld3q_dup_s64(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] #[doc = "## Safety"] @@ -20698,7 +19819,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" @@ -20711,7 +19832,6 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] #[doc = "## Safety"] @@ -20721,7 +19841,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" @@ -20730,7 +19850,6 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { } _vld3_f64(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] #[doc = "## Safety"] @@ -20742,7 +19861,7 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" @@ -20757,7 +19876,6 @@ pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> } _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] #[doc = "## Safety"] @@ -20771,7 +19889,6 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> static_assert!(LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] #[doc = "## Safety"] @@ -20783,7 +19900,7 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" @@ -20798,7 +19915,6 @@ pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> i } _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] #[doc = "## Safety"] @@ -20812,7 +19928,6 @@ pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> static_assert!(LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] @@ -20825,7 +19940,6 @@ pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { transmute(vld3q_dup_s64(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] @@ -20842,7 +19956,6 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] @@ -20855,7 +19968,6 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { transmute(vld3q_dup_s64(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] @@ -20872,7 +19984,6 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] #[doc = "## Safety"] @@ -20883,7 +19994,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" @@ -20892,7 +20003,6 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { } _vld3q_f64(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] #[doc = "## Safety"] @@ -20903,7 +20013,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" @@ -20916,7 +20026,6 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] #[doc = "## Safety"] @@ -20927,7 +20036,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" @@ -20936,7 +20045,6 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { } _vld3q_s64(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] #[doc = "## Safety"] @@ -20947,7 +20055,7 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" @@ -20960,7 +20068,6 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] #[doc = "## Safety"] @@ -20973,7 +20080,7 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" @@ -20988,7 +20095,6 @@ pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - } _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] #[doc = "## Safety"] @@ -21001,7 +20107,7 @@ pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" @@ -21024,7 +20130,6 @@ pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] #[doc = "## Safety"] @@ -21039,7 +20144,6 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] #[doc = "## Safety"] @@ -21062,7 +20166,6 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] #[doc = "## Safety"] @@ -21075,7 +20178,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" @@ -21090,7 +20193,6 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in } _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] #[doc = "## Safety"] @@ -21103,7 +20205,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" @@ -21150,7 +20252,6 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in ); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] #[doc = "## Safety"] @@ -21163,7 +20264,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" @@ -21178,7 +20279,6 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> } _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] #[doc = "## Safety"] @@ -21191,7 +20291,7 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" @@ -21214,7 +20314,6 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] #[doc = "## Safety"] @@ -21229,7 +20328,6 @@ pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> u static_assert_uimm_bits!(LANE, 4); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] #[doc = "## Safety"] @@ -21276,7 +20374,6 @@ pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> u ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] #[doc = "## Safety"] @@ -21291,7 +20388,6 @@ pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] #[doc = "## Safety"] @@ -21314,7 +20410,6 @@ pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] #[doc = "## Safety"] @@ -21329,7 +20424,6 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p static_assert_uimm_bits!(LANE, 4); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] #[doc = "## Safety"] @@ -21376,7 +20470,6 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] @@ -21389,7 +20482,6 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { transmute(vld3q_s64(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] @@ -21406,7 +20498,6 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] #[doc = "## Safety"] @@ -21419,7 +20510,6 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { transmute(vld3q_s64(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] #[doc = "## Safety"] @@ -21436,7 +20526,6 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] #[doc = "## Safety"] @@ -21446,7 +20535,7 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" @@ -21455,7 +20544,6 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { } _vld4_dup_f64(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] #[doc = "## Safety"] @@ -21466,7 +20554,7 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" @@ -21475,7 +20563,6 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { } _vld4q_dup_f64(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] #[doc = "## Safety"] @@ -21486,7 +20573,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" @@ -21500,7 +20587,6 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] #[doc = "## Safety"] @@ -21511,7 +20597,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" @@ -21520,7 +20606,6 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { } _vld4q_dup_s64(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] #[doc = "## Safety"] @@ -21531,7 +20616,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" @@ -21545,7 +20630,6 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] #[doc = "## Safety"] @@ -21555,7 +20639,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" @@ -21564,7 +20648,6 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { } _vld4_f64(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] #[doc = "## Safety"] @@ -21576,7 +20659,7 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" @@ -21592,7 +20675,6 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> } _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] #[doc = "## Safety"] @@ -21604,7 +20686,7 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" @@ -21620,7 +20702,6 @@ pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> i } _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] #[doc = "## Safety"] @@ -21634,7 +20715,6 @@ pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> static_assert!(LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] #[doc = "## Safety"] @@ -21648,7 +20728,6 @@ pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> static_assert!(LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] @@ -21661,7 +20740,6 @@ pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { transmute(vld4q_dup_s64(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] @@ -21679,7 +20757,6 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] @@ -21692,7 +20769,6 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { transmute(vld4q_dup_s64(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] @@ -21710,7 +20786,6 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] #[doc = "## Safety"] @@ -21721,7 +20796,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" @@ -21730,7 +20805,6 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { } _vld4q_f64(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] #[doc = "## Safety"] @@ -21741,7 +20815,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" @@ -21755,7 +20829,6 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] #[doc = "## Safety"] @@ -21766,7 +20839,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" @@ -21775,7 +20848,6 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { } _vld4q_s64(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] #[doc = "## Safety"] @@ -21786,7 +20858,7 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" @@ -21800,7 +20872,6 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] #[doc = "## Safety"] @@ -21813,7 +20884,7 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" @@ -21829,7 +20900,6 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - } _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] #[doc = "## Safety"] @@ -21842,7 +20912,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" @@ -21868,7 +20938,6 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] #[doc = "## Safety"] @@ -21881,7 +20950,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" @@ -21897,7 +20966,6 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in } _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] #[doc = "## Safety"] @@ -21910,7 +20978,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" @@ -21968,7 +21036,6 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] #[doc = "## Safety"] @@ -21981,7 +21048,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" @@ -21997,7 +21064,6 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> } _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] #[doc = "## Safety"] @@ -22010,7 +21076,7 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" @@ -22036,7 +21102,6 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] #[doc = "## Safety"] @@ -22051,7 +21116,6 @@ pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] #[doc = "## Safety"] @@ -22076,7 +21140,6 @@ pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] #[doc = "## Safety"] @@ -22091,7 +21154,6 @@ pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> u static_assert_uimm_bits!(LANE, 4); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] #[doc = "## Safety"] @@ -22148,7 +21210,6 @@ pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> u ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] #[doc = "## Safety"] @@ -22163,7 +21224,6 @@ pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] #[doc = "## Safety"] @@ -22188,7 +21248,6 @@ pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] #[doc = "## Safety"] @@ -22203,7 +21262,6 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p static_assert_uimm_bits!(LANE, 4); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] #[doc = "## Safety"] @@ -22260,7 +21318,6 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] @@ -22273,7 +21330,6 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { transmute(vld4q_s64(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] @@ -22291,7 +21347,6 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] #[doc = "## Safety"] @@ -22304,7 +21359,6 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { transmute(vld4q_s64(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] #[doc = "## Safety"] @@ -22322,7 +21376,6 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] #[doc = "## Safety"] @@ -22332,7 +21385,7 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v1f64" @@ -22341,7 +21394,6 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vmax_f64(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] #[doc = "## Safety"] @@ -22352,7 +21404,7 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v2f64" @@ -22361,7 +21413,6 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vmaxq_f64(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] #[doc = "## Safety"] @@ -22372,7 +21423,7 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v2f64" @@ -22384,7 +21435,6 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vmaxq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] #[doc = "## Safety"] @@ -22394,7 +21444,7 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v1f64" @@ -22403,7 +21453,6 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vmaxnm_f64(a, b) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] #[doc = "## Safety"] @@ -22414,7 +21463,7 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v2f64" @@ -22423,7 +21472,6 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vmaxnmq_f64(a, b) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] #[doc = "## Safety"] @@ -22434,7 +21482,7 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v2f64" @@ -22446,7 +21494,6 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vmaxnmq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] #[doc = "## Safety"] @@ -22457,7 +21504,7 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -22466,7 +21513,6 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { } _vmaxnmv_f32(a) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] #[doc = "## Safety"] @@ -22477,7 +21523,7 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -22487,7 +21533,6 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxnmv_f32(a) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] #[doc = "## Safety"] @@ -22498,7 +21543,7 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -22507,7 +21552,6 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { } _vmaxnmvq_f64(a) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] #[doc = "## Safety"] @@ -22518,7 +21562,7 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -22528,7 +21572,6 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxnmvq_f64(a) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] #[doc = "## Safety"] @@ -22539,7 +21582,7 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" @@ -22548,7 +21591,6 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { } _vmaxnmvq_f32(a) } - #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] #[doc = "## Safety"] @@ -22559,7 +21601,7 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" @@ -22569,7 +21611,6 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxnmvq_f32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] #[doc = "## Safety"] @@ -22580,7 +21621,7 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" @@ -22589,7 +21630,6 @@ pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { } _vmaxv_f32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] #[doc = "## Safety"] @@ -22600,7 +21640,7 @@ pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" @@ -22610,7 +21650,6 @@ pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxv_f32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] #[doc = "## Safety"] @@ -22621,7 +21660,7 @@ pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxv))] pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" @@ -22630,7 +21669,6 @@ pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { } _vmaxvq_f32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] #[doc = "## Safety"] @@ -22641,7 +21679,7 @@ pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxv))] pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" @@ -22651,7 +21689,6 @@ pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxvq_f32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] #[doc = "## Safety"] @@ -22662,7 +21699,7 @@ pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" @@ -22671,7 +21708,6 @@ pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { } _vmaxvq_f64(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] #[doc = "## Safety"] @@ -22682,7 +21718,7 @@ pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" @@ -22692,7 +21728,6 @@ pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxvq_f64(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] #[doc = "## Safety"] @@ -22703,7 +21738,7 @@ pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" @@ -22712,7 +21747,6 @@ pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { } _vmaxv_s8(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] #[doc = "## Safety"] @@ -22723,7 +21757,7 @@ pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" @@ -22733,7 +21767,6 @@ pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vmaxv_s8(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] #[doc = "## Safety"] @@ -22744,7 +21777,7 @@ pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" @@ -22753,7 +21786,6 @@ pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { } _vmaxvq_s8(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] #[doc = "## Safety"] @@ -22764,7 +21796,7 @@ pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" @@ -22774,7 +21806,6 @@ pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vmaxvq_s8(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] #[doc = "## Safety"] @@ -22785,7 +21816,7 @@ pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" @@ -22794,7 +21825,6 @@ pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { } _vmaxv_s16(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] #[doc = "## Safety"] @@ -22805,7 +21835,7 @@ pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" @@ -22815,7 +21845,6 @@ pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxv_s16(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] #[doc = "## Safety"] @@ -22826,7 +21855,7 @@ pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" @@ -22835,7 +21864,6 @@ pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { } _vmaxvq_s16(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] #[doc = "## Safety"] @@ -22846,7 +21874,7 @@ pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" @@ -22856,7 +21884,6 @@ pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vmaxvq_s16(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] #[doc = "## Safety"] @@ -22867,7 +21894,7 @@ pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" @@ -22876,7 +21903,6 @@ pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { } _vmaxv_s32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] #[doc = "## Safety"] @@ -22887,7 +21913,7 @@ pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" @@ -22897,7 +21923,6 @@ pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxv_s32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] #[doc = "## Safety"] @@ -22908,7 +21933,7 @@ pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" @@ -22917,7 +21942,6 @@ pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { } _vmaxvq_s32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] #[doc = "## Safety"] @@ -22928,7 +21952,7 @@ pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" @@ -22938,7 +21962,6 @@ pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxvq_s32(a) } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] #[doc = "## Safety"] @@ -22949,7 +21972,7 @@ pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" @@ -22958,7 +21981,6 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { } _vmaxv_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] #[doc = "## Safety"] @@ -22969,7 +21991,7 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" @@ -22979,7 +22001,6 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vmaxv_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] #[doc = "## Safety"] @@ -22990,7 +22011,7 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" @@ -22999,7 +22020,6 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { } _vmaxvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] #[doc = "## Safety"] @@ -23010,7 +22030,7 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" @@ -23020,7 +22040,6 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vmaxvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] #[doc = "## Safety"] @@ -23031,7 +22050,7 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" @@ -23040,7 +22059,6 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { } _vmaxv_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] #[doc = "## Safety"] @@ -23051,7 +22069,7 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" @@ -23061,7 +22079,6 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxv_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] #[doc = "## Safety"] @@ -23072,7 +22089,7 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" @@ -23081,7 +22098,6 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { } _vmaxvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] #[doc = "## Safety"] @@ -23092,7 +22108,7 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" @@ -23102,7 +22118,6 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vmaxvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] #[doc = "## Safety"] @@ -23113,7 +22128,7 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" @@ -23122,7 +22137,6 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { } _vmaxv_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] #[doc = "## Safety"] @@ -23133,7 +22147,7 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" @@ -23143,7 +22157,6 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vmaxv_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] #[doc = "## Safety"] @@ -23154,7 +22167,7 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" @@ -23163,7 +22176,6 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { } _vmaxvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] #[doc = "## Safety"] @@ -23174,7 +22186,7 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" @@ -23184,7 +22196,6 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vmaxvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] #[doc = "## Safety"] @@ -23194,7 +22205,7 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v1f64" @@ -23203,7 +22214,6 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vmin_f64(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] #[doc = "## Safety"] @@ -23214,7 +22224,7 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v2f64" @@ -23223,7 +22233,6 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vminq_f64(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] #[doc = "## Safety"] @@ -23234,7 +22243,7 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v2f64" @@ -23246,7 +22255,6 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vminq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] #[doc = "## Safety"] @@ -23256,7 +22264,7 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v1f64" @@ -23265,7 +22273,6 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vminnm_f64(a, b) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] #[doc = "## Safety"] @@ -23276,7 +22283,7 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v2f64" @@ -23285,7 +22292,6 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vminnmq_f64(a, b) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] #[doc = "## Safety"] @@ -23296,7 +22302,7 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v2f64" @@ -23308,7 +22314,6 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vminnmq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] #[doc = "## Safety"] @@ -23319,7 +22324,7 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -23328,7 +22333,6 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { } _vminnmv_f32(a) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] #[doc = "## Safety"] @@ -23339,7 +22343,7 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -23349,7 +22353,6 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vminnmv_f32(a) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] #[doc = "## Safety"] @@ -23360,7 +22363,7 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -23369,7 +22372,6 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { } _vminnmvq_f64(a) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] #[doc = "## Safety"] @@ -23380,7 +22382,7 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -23390,7 +22392,6 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vminnmvq_f64(a) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] #[doc = "## Safety"] @@ -23401,7 +22402,7 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" @@ -23410,7 +22411,6 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { } _vminnmvq_f32(a) } - #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] #[doc = "## Safety"] @@ -23421,7 +22421,7 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" @@ -23431,7 +22431,6 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminnmvq_f32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] #[doc = "## Safety"] @@ -23442,7 +22441,7 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32" @@ -23451,7 +22450,6 @@ pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { } _vminv_f32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] #[doc = "## Safety"] @@ -23462,7 +22460,7 @@ pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32" @@ -23472,7 +22470,6 @@ pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vminv_f32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] #[doc = "## Safety"] @@ -23483,7 +22480,7 @@ pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminv))] pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v4f32" @@ -23492,7 +22489,6 @@ pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { } _vminvq_f32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] #[doc = "## Safety"] @@ -23503,7 +22499,7 @@ pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminv))] pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v4f32" @@ -23513,7 +22509,6 @@ pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminvq_f32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] #[doc = "## Safety"] @@ -23524,7 +22519,7 @@ pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64" @@ -23533,7 +22528,6 @@ pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { } _vminvq_f64(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] #[doc = "## Safety"] @@ -23544,7 +22538,7 @@ pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64" @@ -23554,7 +22548,6 @@ pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vminvq_f64(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] #[doc = "## Safety"] @@ -23565,7 +22558,7 @@ pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i8.v8i8" @@ -23574,7 +22567,6 @@ pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { } _vminv_s8(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] #[doc = "## Safety"] @@ -23585,7 +22577,7 @@ pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i8.v8i8" @@ -23595,7 +22587,6 @@ pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vminv_s8(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] #[doc = "## Safety"] @@ -23606,7 +22597,7 @@ pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i8.v16i8" @@ -23615,7 +22606,6 @@ pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { } _vminvq_s8(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] #[doc = "## Safety"] @@ -23626,7 +22616,7 @@ pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i8.v16i8" @@ -23636,7 +22626,6 @@ pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vminvq_s8(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] #[doc = "## Safety"] @@ -23647,7 +22636,7 @@ pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i16.v4i16" @@ -23656,7 +22645,6 @@ pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { } _vminv_s16(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] #[doc = "## Safety"] @@ -23667,7 +22655,7 @@ pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i16.v4i16" @@ -23677,7 +22665,6 @@ pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminv_s16(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] #[doc = "## Safety"] @@ -23688,7 +22675,7 @@ pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i16.v8i16" @@ -23697,7 +22684,6 @@ pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { } _vminvq_s16(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] #[doc = "## Safety"] @@ -23708,7 +22694,7 @@ pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i16.v8i16" @@ -23718,7 +22704,6 @@ pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vminvq_s16(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] #[doc = "## Safety"] @@ -23729,7 +22714,7 @@ pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i32.v2i32" @@ -23738,7 +22723,6 @@ pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { } _vminv_s32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] #[doc = "## Safety"] @@ -23749,7 +22733,7 @@ pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i32.v2i32" @@ -23759,7 +22743,6 @@ pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vminv_s32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] #[doc = "## Safety"] @@ -23770,7 +22753,7 @@ pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i32.v4i32" @@ -23779,7 +22762,6 @@ pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { } _vminvq_s32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] #[doc = "## Safety"] @@ -23790,7 +22772,7 @@ pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminv.i32.v4i32" @@ -23800,7 +22782,6 @@ pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminvq_s32(a) } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] #[doc = "## Safety"] @@ -23811,7 +22792,7 @@ pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v8i8" @@ -23820,7 +22801,6 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { } _vminv_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] #[doc = "## Safety"] @@ -23831,7 +22811,7 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v8i8" @@ -23841,7 +22821,6 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vminv_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] #[doc = "## Safety"] @@ -23852,7 +22831,7 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v16i8" @@ -23861,7 +22840,6 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { } _vminvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] #[doc = "## Safety"] @@ -23872,7 +22850,7 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v16i8" @@ -23882,7 +22860,6 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vminvq_u8(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] #[doc = "## Safety"] @@ -23893,7 +22870,7 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v4i16" @@ -23902,7 +22879,6 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { } _vminv_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] #[doc = "## Safety"] @@ -23913,7 +22889,7 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v4i16" @@ -23923,7 +22899,6 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminv_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] #[doc = "## Safety"] @@ -23934,7 +22909,7 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v8i16" @@ -23943,7 +22918,6 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { } _vminvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] #[doc = "## Safety"] @@ -23954,7 +22928,7 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v8i16" @@ -23964,7 +22938,6 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vminvq_u16(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] #[doc = "## Safety"] @@ -23975,7 +22948,7 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v2i32" @@ -23984,7 +22957,6 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { } _vminv_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] #[doc = "## Safety"] @@ -23995,7 +22967,7 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v2i32" @@ -24005,7 +22977,6 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vminv_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] #[doc = "## Safety"] @@ -24016,7 +22987,7 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v4i32" @@ -24025,7 +22996,6 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { } _vminvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] #[doc = "## Safety"] @@ -24036,7 +23006,7 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v4i32" @@ -24046,7 +23016,6 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vminvq_u32(a.as_signed()).as_unsigned() } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] #[doc = "## Safety"] @@ -24058,7 +23027,6 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] #[doc = "## Safety"] @@ -24071,7 +23039,6 @@ pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] #[doc = "## Safety"] @@ -24088,7 +23055,6 @@ pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float let ret_val: float64x2_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] #[doc = "## Safety"] @@ -24124,7 +23090,6 @@ pub unsafe fn vmlal_high_lane_s16( ), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] #[doc = "## Safety"] @@ -24164,7 +23129,6 @@ pub unsafe fn vmlal_high_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] #[doc = "## Safety"] @@ -24200,7 +23164,6 @@ pub unsafe fn vmlal_high_laneq_s16( ), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] #[doc = "## Safety"] @@ -24240,7 +23203,6 @@ pub unsafe fn vmlal_high_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] #[doc = "## Safety"] @@ -24263,7 +23225,6 @@ pub unsafe fn vmlal_high_lane_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] #[doc = "## Safety"] @@ -24290,7 +23251,6 @@ pub unsafe fn vmlal_high_lane_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] #[doc = "## Safety"] @@ -24313,7 +23273,6 @@ pub unsafe fn vmlal_high_laneq_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] #[doc = "## Safety"] @@ -24340,7 +23299,6 @@ pub unsafe fn vmlal_high_laneq_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] #[doc = "## Safety"] @@ -24376,7 +23334,6 @@ pub unsafe fn vmlal_high_lane_u16( ), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] #[doc = "## Safety"] @@ -24416,7 +23373,6 @@ pub unsafe fn vmlal_high_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] #[doc = "## Safety"] @@ -24452,7 +23408,6 @@ pub unsafe fn vmlal_high_laneq_u16( ), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] #[doc = "## Safety"] @@ -24492,7 +23447,6 @@ pub unsafe fn vmlal_high_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] #[doc = "## Safety"] @@ -24515,7 +23469,6 @@ pub unsafe fn vmlal_high_lane_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] #[doc = "## Safety"] @@ -24542,7 +23495,6 @@ pub unsafe fn vmlal_high_lane_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] #[doc = "## Safety"] @@ -24565,7 +23517,6 @@ pub unsafe fn vmlal_high_laneq_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] #[doc = "## Safety"] @@ -24592,7 +23543,6 @@ pub unsafe fn vmlal_high_laneq_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] #[doc = "## Safety"] @@ -24605,7 +23555,6 @@ pub unsafe fn vmlal_high_laneq_u32( pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vmlal_high_s16(a, b, vdupq_n_s16(c)) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] #[doc = "## Safety"] @@ -24621,7 +23570,6 @@ pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t let ret_val: int32x4_t = vmlal_high_s16(a, b, vdupq_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] #[doc = "## Safety"] @@ -24634,7 +23582,6 @@ pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vmlal_high_s32(a, b, vdupq_n_s32(c)) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] #[doc = "## Safety"] @@ -24650,7 +23597,6 @@ pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t let ret_val: int64x2_t = vmlal_high_s32(a, b, vdupq_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] #[doc = "## Safety"] @@ -24663,7 +23609,6 @@ pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { vmlal_high_u16(a, b, vdupq_n_u16(c)) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] #[doc = "## Safety"] @@ -24679,7 +23624,6 @@ pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 let ret_val: uint32x4_t = vmlal_high_u16(a, b, vdupq_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] #[doc = "## Safety"] @@ -24692,7 +23636,6 @@ pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { vmlal_high_u32(a, b, vdupq_n_u32(c)) } - #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] #[doc = "## Safety"] @@ -24708,7 +23651,6 @@ pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 let ret_val: uint64x2_t = vmlal_high_u32(a, b, vdupq_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] #[doc = "## Safety"] @@ -24723,7 +23665,6 @@ pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); vmlal_s8(a, b, c) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] #[doc = "## Safety"] @@ -24742,7 +23683,6 @@ pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let ret_val: int16x8_t = vmlal_s8(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] #[doc = "## Safety"] @@ -24757,7 +23697,6 @@ pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); vmlal_s16(a, b, c) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] #[doc = "## Safety"] @@ -24776,7 +23715,6 @@ pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let ret_val: int32x4_t = vmlal_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] #[doc = "## Safety"] @@ -24791,7 +23729,6 @@ pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); vmlal_s32(a, b, c) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] #[doc = "## Safety"] @@ -24810,7 +23747,6 @@ pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let ret_val: int64x2_t = vmlal_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] #[doc = "## Safety"] @@ -24825,7 +23761,6 @@ pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); vmlal_u8(a, b, c) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] #[doc = "## Safety"] @@ -24844,7 +23779,6 @@ pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let ret_val: uint16x8_t = vmlal_u8(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] #[doc = "## Safety"] @@ -24859,7 +23793,6 @@ pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); vmlal_u16(a, b, c) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] #[doc = "## Safety"] @@ -24878,7 +23811,6 @@ pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let ret_val: uint32x4_t = vmlal_u16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] #[doc = "## Safety"] @@ -24893,7 +23825,6 @@ pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); vmlal_u32(a, b, c) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] #[doc = "## Safety"] @@ -24912,7 +23843,6 @@ pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let ret_val: uint64x2_t = vmlal_u32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] #[doc = "## Safety"] @@ -24924,7 +23854,6 @@ pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] #[doc = "## Safety"] @@ -24937,7 +23866,6 @@ pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] #[doc = "## Safety"] @@ -24954,7 +23882,6 @@ pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float let ret_val: float64x2_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] #[doc = "## Safety"] @@ -24990,7 +23917,6 @@ pub unsafe fn vmlsl_high_lane_s16( ), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] #[doc = "## Safety"] @@ -25030,7 +23956,6 @@ pub unsafe fn vmlsl_high_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] #[doc = "## Safety"] @@ -25066,7 +23991,6 @@ pub unsafe fn vmlsl_high_laneq_s16( ), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] #[doc = "## Safety"] @@ -25106,7 +24030,6 @@ pub unsafe fn vmlsl_high_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] #[doc = "## Safety"] @@ -25129,7 +24052,6 @@ pub unsafe fn vmlsl_high_lane_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] #[doc = "## Safety"] @@ -25156,7 +24078,6 @@ pub unsafe fn vmlsl_high_lane_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] #[doc = "## Safety"] @@ -25179,7 +24100,6 @@ pub unsafe fn vmlsl_high_laneq_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] #[doc = "## Safety"] @@ -25206,7 +24126,6 @@ pub unsafe fn vmlsl_high_laneq_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] #[doc = "## Safety"] @@ -25242,7 +24161,6 @@ pub unsafe fn vmlsl_high_lane_u16( ), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] #[doc = "## Safety"] @@ -25282,7 +24200,6 @@ pub unsafe fn vmlsl_high_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] #[doc = "## Safety"] @@ -25318,7 +24235,6 @@ pub unsafe fn vmlsl_high_laneq_u16( ), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] #[doc = "## Safety"] @@ -25358,7 +24274,6 @@ pub unsafe fn vmlsl_high_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] #[doc = "## Safety"] @@ -25381,7 +24296,6 @@ pub unsafe fn vmlsl_high_lane_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] #[doc = "## Safety"] @@ -25408,7 +24322,6 @@ pub unsafe fn vmlsl_high_lane_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] #[doc = "## Safety"] @@ -25431,7 +24344,6 @@ pub unsafe fn vmlsl_high_laneq_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] #[doc = "## Safety"] @@ -25458,7 +24370,6 @@ pub unsafe fn vmlsl_high_laneq_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] #[doc = "## Safety"] @@ -25471,7 +24382,6 @@ pub unsafe fn vmlsl_high_laneq_u32( pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vmlsl_high_s16(a, b, vdupq_n_s16(c)) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] #[doc = "## Safety"] @@ -25487,7 +24397,6 @@ pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t let ret_val: int32x4_t = vmlsl_high_s16(a, b, vdupq_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] #[doc = "## Safety"] @@ -25500,7 +24409,6 @@ pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vmlsl_high_s32(a, b, vdupq_n_s32(c)) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] #[doc = "## Safety"] @@ -25516,7 +24424,6 @@ pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t let ret_val: int64x2_t = vmlsl_high_s32(a, b, vdupq_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] #[doc = "## Safety"] @@ -25529,7 +24436,6 @@ pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { vmlsl_high_u16(a, b, vdupq_n_u16(c)) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] #[doc = "## Safety"] @@ -25545,7 +24451,6 @@ pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 let ret_val: uint32x4_t = vmlsl_high_u16(a, b, vdupq_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] #[doc = "## Safety"] @@ -25558,7 +24463,6 @@ pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4 pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { vmlsl_high_u32(a, b, vdupq_n_u32(c)) } - #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] #[doc = "## Safety"] @@ -25574,7 +24478,6 @@ pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 let ret_val: uint64x2_t = vmlsl_high_u32(a, b, vdupq_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] #[doc = "## Safety"] @@ -25589,7 +24492,6 @@ pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); vmlsl_s8(a, b, c) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] #[doc = "## Safety"] @@ -25608,7 +24510,6 @@ pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 let ret_val: int16x8_t = vmlsl_s8(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] #[doc = "## Safety"] @@ -25623,7 +24524,6 @@ pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); vmlsl_s16(a, b, c) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] #[doc = "## Safety"] @@ -25642,7 +24542,6 @@ pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x let ret_val: int32x4_t = vmlsl_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] #[doc = "## Safety"] @@ -25657,7 +24556,6 @@ pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); vmlsl_s32(a, b, c) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] #[doc = "## Safety"] @@ -25676,7 +24574,6 @@ pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let ret_val: int64x2_t = vmlsl_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] #[doc = "## Safety"] @@ -25691,7 +24588,6 @@ pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); vmlsl_u8(a, b, c) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] #[doc = "## Safety"] @@ -25710,7 +24606,6 @@ pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint let ret_val: uint16x8_t = vmlsl_u8(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] #[doc = "## Safety"] @@ -25725,7 +24620,6 @@ pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); vmlsl_u16(a, b, c) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] #[doc = "## Safety"] @@ -25744,7 +24638,6 @@ pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin let ret_val: uint32x4_t = vmlsl_u16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] #[doc = "## Safety"] @@ -25759,7 +24652,6 @@ pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); vmlsl_u32(a, b, c) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] #[doc = "## Safety"] @@ -25778,7 +24670,6 @@ pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let ret_val: uint64x2_t = vmlsl_u32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] #[doc = "## Safety"] @@ -25792,7 +24683,6 @@ pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vmovl_s8(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] #[doc = "## Safety"] @@ -25808,7 +24698,6 @@ pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = vmovl_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] #[doc = "## Safety"] @@ -25822,7 +24711,6 @@ pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vmovl_s16(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] #[doc = "## Safety"] @@ -25838,7 +24726,6 @@ pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = vmovl_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] #[doc = "## Safety"] @@ -25852,7 +24739,6 @@ pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); vmovl_s32(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] #[doc = "## Safety"] @@ -25868,7 +24754,6 @@ pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = vmovl_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] #[doc = "## Safety"] @@ -25882,7 +24767,6 @@ pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vmovl_u8(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] #[doc = "## Safety"] @@ -25898,7 +24782,6 @@ pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = vmovl_u8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] #[doc = "## Safety"] @@ -25912,7 +24795,6 @@ pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vmovl_u16(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] #[doc = "## Safety"] @@ -25928,7 +24810,6 @@ pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = vmovl_u16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] #[doc = "## Safety"] @@ -25942,7 +24823,6 @@ pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); vmovl_u32(a) } - #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] #[doc = "## Safety"] @@ -25958,7 +24838,6 @@ pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = vmovl_u32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] #[doc = "## Safety"] @@ -25972,7 +24851,6 @@ pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { let c: int8x8_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] #[doc = "## Safety"] @@ -25994,7 +24872,6 @@ pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] #[doc = "## Safety"] @@ -26008,7 +24885,6 @@ pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { let c: int16x4_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] #[doc = "## Safety"] @@ -26025,7 +24901,6 @@ pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] #[doc = "## Safety"] @@ -26039,7 +24914,6 @@ pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { let c: int32x2_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] #[doc = "## Safety"] @@ -26056,7 +24930,6 @@ pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] #[doc = "## Safety"] @@ -26070,7 +24943,6 @@ pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { let c: uint8x8_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] #[doc = "## Safety"] @@ -26092,7 +24964,6 @@ pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] #[doc = "## Safety"] @@ -26106,7 +24977,6 @@ pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { let c: uint16x4_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] #[doc = "## Safety"] @@ -26123,7 +24993,6 @@ pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] #[doc = "## Safety"] @@ -26137,7 +25006,6 @@ pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { let c: uint32x2_t = simd_cast(b); simd_shuffle!(a, c, [0, 1, 2, 3]) } - #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] #[doc = "## Safety"] @@ -26154,7 +25022,6 @@ pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] #[doc = "## Safety"] @@ -26166,7 +25033,6 @@ pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] #[doc = "## Safety"] @@ -26179,7 +25045,6 @@ pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] #[doc = "## Safety"] @@ -26195,7 +25060,6 @@ pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] #[doc = "## Safety"] @@ -26209,7 +25073,6 @@ pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> static_assert!(LANE == 0); simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] #[doc = "## Safety"] @@ -26224,7 +25087,6 @@ pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> static_assert_uimm_bits!(LANE, 1); simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] #[doc = "## Safety"] @@ -26240,7 +25102,6 @@ pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] #[doc = "## Safety"] @@ -26252,7 +25113,6 @@ pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { simd_mul(a, vdup_n_f64(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] #[doc = "## Safety"] @@ -26265,7 +25125,6 @@ pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { simd_mul(a, vdupq_n_f64(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] #[doc = "## Safety"] @@ -26280,7 +25139,6 @@ pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { let ret_val: float64x2_t = simd_mul(a, vdupq_n_f64(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] #[doc = "## Safety"] @@ -26295,7 +25153,6 @@ pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { let b: f64 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] #[doc = "## Safety"] @@ -26326,7 +25183,6 @@ pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) - ), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] #[doc = "## Safety"] @@ -26360,7 +25216,6 @@ pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) - ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] #[doc = "## Safety"] @@ -26391,7 +25246,6 @@ pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) ), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] #[doc = "## Safety"] @@ -26425,7 +25279,6 @@ pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] #[doc = "## Safety"] @@ -26443,7 +25296,6 @@ pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] #[doc = "## Safety"] @@ -26464,7 +25316,6 @@ pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) - ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] #[doc = "## Safety"] @@ -26482,7 +25333,6 @@ pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] #[doc = "## Safety"] @@ -26503,7 +25353,6 @@ pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] #[doc = "## Safety"] @@ -26534,7 +25383,6 @@ pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) ), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] #[doc = "## Safety"] @@ -26568,7 +25416,6 @@ pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] #[doc = "## Safety"] @@ -26599,7 +25446,6 @@ pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t ), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] #[doc = "## Safety"] @@ -26633,7 +25479,6 @@ pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] #[doc = "## Safety"] @@ -26651,7 +25496,6 @@ pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] #[doc = "## Safety"] @@ -26672,7 +25516,6 @@ pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] #[doc = "## Safety"] @@ -26690,7 +25533,6 @@ pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] #[doc = "## Safety"] @@ -26711,7 +25553,6 @@ pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t ); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] #[doc = "## Safety"] @@ -26724,7 +25565,6 @@ pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { vmull_high_s16(a, vdupq_n_s16(b)) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] #[doc = "## Safety"] @@ -26739,7 +25579,6 @@ pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { let ret_val: int32x4_t = vmull_high_s16(a, vdupq_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] #[doc = "## Safety"] @@ -26752,7 +25591,6 @@ pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { vmull_high_s32(a, vdupq_n_s32(b)) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] #[doc = "## Safety"] @@ -26767,7 +25605,6 @@ pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { let ret_val: int64x2_t = vmull_high_s32(a, vdupq_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] #[doc = "## Safety"] @@ -26780,7 +25617,6 @@ pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { vmull_high_u16(a, vdupq_n_u16(b)) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] #[doc = "## Safety"] @@ -26795,7 +25631,6 @@ pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { let ret_val: uint32x4_t = vmull_high_u16(a, vdupq_n_u16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] #[doc = "## Safety"] @@ -26808,7 +25643,6 @@ pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { vmull_high_u32(a, vdupq_n_u32(b)) } - #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] #[doc = "## Safety"] @@ -26823,7 +25657,6 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { let ret_val: uint64x2_t = vmull_high_u32(a, vdupq_n_u32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] #[doc = "## Safety"] @@ -26836,7 +25669,6 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] #[doc = "## Safety"] @@ -26851,7 +25683,6 @@ pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] #[doc = "## Safety"] @@ -26866,7 +25697,6 @@ pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); vmull_p8(a, b) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] #[doc = "## Safety"] @@ -26884,7 +25714,6 @@ pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { let ret_val: poly16x8_t = vmull_p8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] #[doc = "## Safety"] @@ -26899,7 +25728,6 @@ pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); vmull_s8(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] #[doc = "## Safety"] @@ -26917,7 +25745,6 @@ pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = vmull_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] #[doc = "## Safety"] @@ -26932,7 +25759,6 @@ pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); vmull_s16(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] #[doc = "## Safety"] @@ -26950,7 +25776,6 @@ pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = vmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] #[doc = "## Safety"] @@ -26965,7 +25790,6 @@ pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); vmull_s32(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] #[doc = "## Safety"] @@ -26983,7 +25807,6 @@ pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = vmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] #[doc = "## Safety"] @@ -26998,7 +25821,6 @@ pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); vmull_u8(a, b) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] #[doc = "## Safety"] @@ -27016,7 +25838,6 @@ pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = vmull_u8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] #[doc = "## Safety"] @@ -27031,7 +25852,6 @@ pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); vmull_u16(a, b) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] #[doc = "## Safety"] @@ -27049,7 +25869,6 @@ pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = vmull_u16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] #[doc = "## Safety"] @@ -27064,7 +25883,6 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); vmull_u32(a, b) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] #[doc = "## Safety"] @@ -27082,7 +25900,6 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = vmull_u32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] #[doc = "## Safety"] @@ -27092,7 +25909,7 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(pmull))] pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull64" @@ -27101,7 +25918,6 @@ pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { } transmute(_vmull_p64(a, b)) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] #[doc = "## Safety"] @@ -27116,7 +25932,6 @@ pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> static_assert!(LANE == 0); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] #[doc = "## Safety"] @@ -27133,7 +25948,6 @@ pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] #[doc = "## Safety"] @@ -27148,7 +25962,6 @@ pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) - static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] #[doc = "## Safety"] @@ -27166,7 +25979,6 @@ pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) - let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] #[doc = "## Safety"] @@ -27182,7 +25994,6 @@ pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { let b: f32 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] #[doc = "## Safety"] @@ -27199,7 +26010,6 @@ pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { let b: f32 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] #[doc = "## Safety"] @@ -27215,7 +26025,6 @@ pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { let b: f32 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] #[doc = "## Safety"] @@ -27232,7 +26041,6 @@ pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { let b: f32 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] #[doc = "## Safety"] @@ -27248,7 +26056,6 @@ pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { let b: f64 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] #[doc = "## Safety"] @@ -27265,7 +26072,6 @@ pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { let b: f64 = simd_extract!(b, LANE as u32); a * b } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] #[doc = "## Safety"] @@ -27276,7 +26082,7 @@ pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f32" @@ -27285,7 +26091,6 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vmulx_f32(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] #[doc = "## Safety"] @@ -27296,7 +26101,7 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f32" @@ -27308,7 +26113,6 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vmulx_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] #[doc = "## Safety"] @@ -27319,7 +26123,7 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v4f32" @@ -27328,7 +26132,6 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vmulxq_f32(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] #[doc = "## Safety"] @@ -27339,7 +26142,7 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v4f32" @@ -27351,7 +26154,6 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vmulxq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] #[doc = "## Safety"] @@ -27361,7 +26163,7 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v1f64" @@ -27370,7 +26172,6 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vmulx_f64(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] #[doc = "## Safety"] @@ -27381,7 +26182,7 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f64" @@ -27390,7 +26191,6 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vmulxq_f64(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] #[doc = "## Safety"] @@ -27401,7 +26201,7 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f64" @@ -27413,7 +26213,6 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vmulxq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] #[doc = "## Safety"] @@ -27428,7 +26227,6 @@ pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> static_assert_uimm_bits!(LANE, 1); vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] #[doc = "## Safety"] @@ -27446,7 +26244,6 @@ pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] #[doc = "## Safety"] @@ -27461,7 +26258,6 @@ pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) - static_assert_uimm_bits!(LANE, 2); vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] #[doc = "## Safety"] @@ -27479,7 +26275,6 @@ pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) - let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] #[doc = "## Safety"] @@ -27497,7 +26292,6 @@ pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] #[doc = "## Safety"] @@ -27518,7 +26312,6 @@ pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) - ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] #[doc = "## Safety"] @@ -27536,7 +26329,6 @@ pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] #[doc = "## Safety"] @@ -27557,7 +26349,6 @@ pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] #[doc = "## Safety"] @@ -27572,7 +26363,6 @@ pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) static_assert_uimm_bits!(LANE, 1); vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] #[doc = "## Safety"] @@ -27590,7 +26380,6 @@ pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] #[doc = "## Safety"] @@ -27604,7 +26393,6 @@ pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> static_assert!(LANE == 0); vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] #[doc = "## Safety"] @@ -27619,7 +26407,6 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - static_assert_uimm_bits!(LANE, 1); vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] #[doc = "## Safety"] @@ -27635,7 +26422,6 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] #[doc = "## Safety"] @@ -27645,7 +26431,7 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f64" @@ -27654,7 +26440,6 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { } _vmulxd_f64(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] #[doc = "## Safety"] @@ -27664,7 +26449,7 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f32" @@ -27673,7 +26458,6 @@ pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { } _vmulxs_f32(a, b) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] #[doc = "## Safety"] @@ -27687,7 +26471,6 @@ pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { static_assert!(LANE == 0); vmulxd_f64(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] #[doc = "## Safety"] @@ -27702,7 +26485,6 @@ pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { static_assert_uimm_bits!(LANE, 1); vmulxd_f64(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] #[doc = "## Safety"] @@ -27718,7 +26500,6 @@ pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); vmulxd_f64(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] #[doc = "## Safety"] @@ -27733,7 +26514,6 @@ pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { static_assert_uimm_bits!(LANE, 1); vmulxs_f32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] #[doc = "## Safety"] @@ -27749,7 +26529,6 @@ pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); vmulxs_f32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] #[doc = "## Safety"] @@ -27764,7 +26543,6 @@ pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { static_assert_uimm_bits!(LANE, 2); vmulxs_f32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] #[doc = "## Safety"] @@ -27780,7 +26558,6 @@ pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); vmulxs_f32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] #[doc = "## Safety"] @@ -27795,7 +26572,6 @@ pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) - static_assert!(LANE == 0); vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] #[doc = "## Safety"] @@ -27812,7 +26588,6 @@ pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) - let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] #[doc = "## Safety"] @@ -27824,7 +26599,6 @@ pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) - pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] #[doc = "## Safety"] @@ -27837,7 +26611,6 @@ pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] #[doc = "## Safety"] @@ -27852,7 +26625,6 @@ pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] #[doc = "## Safety"] @@ -27864,7 +26636,6 @@ pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] #[doc = "## Safety"] @@ -27877,7 +26648,6 @@ pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] #[doc = "## Safety"] @@ -27892,7 +26662,6 @@ pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] #[doc = "## Safety"] @@ -27904,7 +26673,6 @@ pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { pub unsafe fn vnegd_s64(a: i64) -> i64 { a.wrapping_neg() } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] #[doc = "## Safety"] @@ -27919,7 +26687,6 @@ pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { let a2: f64 = simd_extract!(a, 1); a1 + a2 } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] #[doc = "## Safety"] @@ -27935,7 +26702,6 @@ pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { let a2: f64 = simd_extract!(a, 1); a1 + a2 } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] #[doc = "## Safety"] @@ -27950,7 +26716,6 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { let a2: f32 = simd_extract!(a, 1); a1 + a2 } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] #[doc = "## Safety"] @@ -27966,7 +26731,6 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { let a2: f32 = simd_extract!(a, 1); a1 + a2 } - #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] #[doc = "## Safety"] @@ -27979,7 +26743,6 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { transmute(vaddvq_u64(transmute(a))) } - #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] #[doc = "## Safety"] @@ -27993,7 +26756,6 @@ pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); transmute(vaddvq_u64(transmute(a))) } - #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] #[doc = "## Safety"] @@ -28006,7 +26768,6 @@ pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { vaddvq_u64(a) } - #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] #[doc = "## Safety"] @@ -28020,7 +26781,6 @@ pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); vaddvq_u64(a) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] #[doc = "## Safety"] @@ -28031,7 +26791,7 @@ pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v4f32" @@ -28040,7 +26800,6 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vpaddq_f32(a, b) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] #[doc = "## Safety"] @@ -28051,7 +26810,7 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v4f32" @@ -28063,7 +26822,6 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vpaddq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] #[doc = "## Safety"] @@ -28074,7 +26832,7 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v2f64" @@ -28083,7 +26841,6 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vpaddq_f64(a, b) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] #[doc = "## Safety"] @@ -28094,7 +26851,7 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v2f64" @@ -28106,7 +26863,6 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vpaddq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] #[doc = "## Safety"] @@ -28117,7 +26873,7 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v16i8" @@ -28126,7 +26882,6 @@ pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vpaddq_s8(a, b) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] #[doc = "## Safety"] @@ -28137,7 +26892,7 @@ pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v16i8" @@ -28153,7 +26908,6 @@ pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] #[doc = "## Safety"] @@ -28164,7 +26918,7 @@ pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i16" @@ -28173,7 +26927,6 @@ pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vpaddq_s16(a, b) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] #[doc = "## Safety"] @@ -28184,7 +26937,7 @@ pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i16" @@ -28196,7 +26949,6 @@ pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vpaddq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] #[doc = "## Safety"] @@ -28207,7 +26959,7 @@ pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i32" @@ -28216,7 +26968,6 @@ pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vpaddq_s32(a, b) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] #[doc = "## Safety"] @@ -28227,7 +26978,7 @@ pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i32" @@ -28239,7 +26990,6 @@ pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vpaddq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] #[doc = "## Safety"] @@ -28250,7 +27000,7 @@ pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i64" @@ -28259,7 +27009,6 @@ pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vpaddq_s64(a, b) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] #[doc = "## Safety"] @@ -28270,7 +27019,7 @@ pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i64" @@ -28282,7 +27031,6 @@ pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vpaddq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[doc = "## Safety"] @@ -28295,7 +27043,6 @@ pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { transmute(vpaddq_s8(transmute(a), transmute(b))) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[doc = "## Safety"] @@ -28315,7 +27062,6 @@ pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[doc = "## Safety"] @@ -28328,7 +27074,6 @@ pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { transmute(vpaddq_s16(transmute(a), transmute(b))) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[doc = "## Safety"] @@ -28344,7 +27089,6 @@ pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[doc = "## Safety"] @@ -28357,7 +27101,6 @@ pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { transmute(vpaddq_s32(transmute(a), transmute(b))) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[doc = "## Safety"] @@ -28373,7 +27116,6 @@ pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[doc = "## Safety"] @@ -28386,7 +27128,6 @@ pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { transmute(vpaddq_s64(transmute(a), transmute(b))) } - #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[doc = "## Safety"] @@ -28402,7 +27143,6 @@ pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] #[doc = "## Safety"] @@ -28413,7 +27153,7 @@ pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" @@ -28422,7 +27162,6 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vpmaxnm_f32(a, b) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] #[doc = "## Safety"] @@ -28433,7 +27172,7 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" @@ -28445,7 +27184,6 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vpmaxnm_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] #[doc = "## Safety"] @@ -28456,7 +27194,7 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" @@ -28465,7 +27203,6 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vpmaxnmq_f32(a, b) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] #[doc = "## Safety"] @@ -28476,7 +27213,7 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" @@ -28488,7 +27225,6 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vpmaxnmq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] #[doc = "## Safety"] @@ -28499,7 +27235,7 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" @@ -28508,7 +27244,6 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vpmaxnmq_f64(a, b) } - #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] #[doc = "## Safety"] @@ -28519,7 +27254,7 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" @@ -28531,7 +27266,6 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vpmaxnmq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] #[doc = "## Safety"] @@ -28542,7 +27276,7 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -28551,7 +27285,6 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { } _vpmaxnmqd_f64(a) } - #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] #[doc = "## Safety"] @@ -28562,7 +27295,7 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" @@ -28572,7 +27305,6 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vpmaxnmqd_f64(a) } - #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] #[doc = "## Safety"] @@ -28583,7 +27315,7 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -28592,7 +27324,6 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { } _vpmaxnms_f32(a) } - #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] #[doc = "## Safety"] @@ -28603,7 +27334,7 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" @@ -28613,7 +27344,6 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vpmaxnms_f32(a) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] #[doc = "## Safety"] @@ -28624,7 +27354,7 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v4f32" @@ -28633,7 +27363,6 @@ pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vpmaxq_f32(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] #[doc = "## Safety"] @@ -28644,7 +27373,7 @@ pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v4f32" @@ -28656,7 +27385,6 @@ pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vpmaxq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] #[doc = "## Safety"] @@ -28667,7 +27395,7 @@ pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v2f64" @@ -28676,7 +27404,6 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vpmaxq_f64(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] #[doc = "## Safety"] @@ -28687,7 +27414,7 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v2f64" @@ -28699,7 +27426,6 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vpmaxq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] #[doc = "## Safety"] @@ -28710,7 +27436,7 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v16i8" @@ -28719,7 +27445,6 @@ pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vpmaxq_s8(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] #[doc = "## Safety"] @@ -28730,7 +27455,7 @@ pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v16i8" @@ -28746,7 +27471,6 @@ pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] #[doc = "## Safety"] @@ -28757,7 +27481,7 @@ pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v8i16" @@ -28766,7 +27490,6 @@ pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vpmaxq_s16(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] #[doc = "## Safety"] @@ -28777,7 +27500,7 @@ pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v8i16" @@ -28789,7 +27512,6 @@ pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vpmaxq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] #[doc = "## Safety"] @@ -28800,7 +27522,7 @@ pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v4i32" @@ -28809,7 +27531,6 @@ pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vpmaxq_s32(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] #[doc = "## Safety"] @@ -28820,7 +27541,7 @@ pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v4i32" @@ -28832,7 +27553,6 @@ pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vpmaxq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] #[doc = "## Safety"] @@ -28843,7 +27563,7 @@ pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v16i8" @@ -28852,7 +27572,6 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] #[doc = "## Safety"] @@ -28863,7 +27582,7 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v16i8" @@ -28879,7 +27598,6 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] #[doc = "## Safety"] @@ -28890,7 +27608,7 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v8i16" @@ -28899,7 +27617,6 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] #[doc = "## Safety"] @@ -28910,7 +27627,7 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v8i16" @@ -28922,7 +27639,6 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] #[doc = "## Safety"] @@ -28933,7 +27649,7 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v4i32" @@ -28942,7 +27658,6 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] #[doc = "## Safety"] @@ -28953,7 +27668,7 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v4i32" @@ -28965,7 +27680,6 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] #[doc = "## Safety"] @@ -28976,7 +27690,7 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" @@ -28985,7 +27699,6 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { } _vpmaxqd_f64(a) } - #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] #[doc = "## Safety"] @@ -28996,7 +27709,7 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" @@ -29006,7 +27719,6 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vpmaxqd_f64(a) } - #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] #[doc = "## Safety"] @@ -29017,7 +27729,7 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" @@ -29026,7 +27738,6 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { } _vpmaxs_f32(a) } - #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] #[doc = "## Safety"] @@ -29037,7 +27748,7 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" @@ -29047,7 +27758,6 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vpmaxs_f32(a) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] #[doc = "## Safety"] @@ -29058,7 +27768,7 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f32" @@ -29067,7 +27777,6 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vpminnm_f32(a, b) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] #[doc = "## Safety"] @@ -29078,7 +27787,7 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f32" @@ -29090,7 +27799,6 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vpminnm_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] #[doc = "## Safety"] @@ -29101,7 +27809,7 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v4f32" @@ -29110,7 +27818,6 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vpminnmq_f32(a, b) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] #[doc = "## Safety"] @@ -29121,7 +27828,7 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v4f32" @@ -29133,7 +27840,6 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vpminnmq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] #[doc = "## Safety"] @@ -29144,7 +27850,7 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f64" @@ -29153,7 +27859,6 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vpminnmq_f64(a, b) } - #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] #[doc = "## Safety"] @@ -29164,7 +27869,7 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f64" @@ -29176,7 +27881,6 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vpminnmq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] #[doc = "## Safety"] @@ -29187,7 +27891,7 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -29196,7 +27900,6 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { } _vpminnmqd_f64(a) } - #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] #[doc = "## Safety"] @@ -29207,7 +27910,7 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" @@ -29217,7 +27920,6 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vpminnmqd_f64(a) } - #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] #[doc = "## Safety"] @@ -29228,7 +27930,7 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -29237,7 +27939,6 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { } _vpminnms_f32(a) } - #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] #[doc = "## Safety"] @@ -29248,7 +27949,7 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" @@ -29258,7 +27959,6 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vpminnms_f32(a) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] #[doc = "## Safety"] @@ -29269,7 +27969,7 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v4f32" @@ -29278,7 +27978,6 @@ pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vpminq_f32(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] #[doc = "## Safety"] @@ -29289,7 +27988,7 @@ pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v4f32" @@ -29301,7 +28000,6 @@ pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vpminq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] #[doc = "## Safety"] @@ -29312,7 +28010,7 @@ pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v2f64" @@ -29321,7 +28019,6 @@ pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vpminq_f64(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] #[doc = "## Safety"] @@ -29332,7 +28029,7 @@ pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v2f64" @@ -29344,7 +28041,6 @@ pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vpminq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] #[doc = "## Safety"] @@ -29355,7 +28051,7 @@ pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v16i8" @@ -29364,7 +28060,6 @@ pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vpminq_s8(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] #[doc = "## Safety"] @@ -29375,7 +28070,7 @@ pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v16i8" @@ -29391,7 +28086,6 @@ pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] #[doc = "## Safety"] @@ -29402,7 +28096,7 @@ pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v8i16" @@ -29411,7 +28105,6 @@ pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vpminq_s16(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] #[doc = "## Safety"] @@ -29422,7 +28115,7 @@ pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v8i16" @@ -29434,7 +28127,6 @@ pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vpminq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] #[doc = "## Safety"] @@ -29445,7 +28137,7 @@ pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v4i32" @@ -29454,7 +28146,6 @@ pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vpminq_s32(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] #[doc = "## Safety"] @@ -29465,7 +28156,7 @@ pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v4i32" @@ -29477,7 +28168,6 @@ pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vpminq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] #[doc = "## Safety"] @@ -29488,7 +28178,7 @@ pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v16i8" @@ -29497,7 +28187,6 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] #[doc = "## Safety"] @@ -29508,7 +28197,7 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v16i8" @@ -29524,7 +28213,6 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] #[doc = "## Safety"] @@ -29535,7 +28223,7 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v8i16" @@ -29544,7 +28232,6 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] #[doc = "## Safety"] @@ -29555,7 +28242,7 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v8i16" @@ -29567,7 +28254,6 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] #[doc = "## Safety"] @@ -29578,7 +28264,7 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v4i32" @@ -29587,7 +28273,6 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] #[doc = "## Safety"] @@ -29598,7 +28283,7 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v4i32" @@ -29610,7 +28295,6 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] #[doc = "## Safety"] @@ -29621,7 +28305,7 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64" @@ -29630,7 +28314,6 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { } _vpminqd_f64(a) } - #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] #[doc = "## Safety"] @@ -29641,7 +28324,7 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64" @@ -29651,7 +28334,6 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vpminqd_f64(a) } - #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] #[doc = "## Safety"] @@ -29662,7 +28344,7 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32" @@ -29671,7 +28353,6 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { } _vpmins_f32(a) } - #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] #[doc = "## Safety"] @@ -29682,7 +28363,7 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32" @@ -29692,7 +28373,6 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); _vpmins_f32(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] #[doc = "## Safety"] @@ -29702,7 +28382,7 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v1i64" @@ -29711,7 +28391,6 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { } _vqabs_s64(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] #[doc = "## Safety"] @@ -29722,7 +28401,7 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i64" @@ -29731,7 +28410,6 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { } _vqabsq_s64(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] #[doc = "## Safety"] @@ -29742,7 +28420,7 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i64" @@ -29753,7 +28431,6 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqabsq_s64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] #[doc = "## Safety"] @@ -29765,7 +28442,6 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { pub unsafe fn vqabsb_s8(a: i8) -> i8 { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) } - #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] #[doc = "## Safety"] @@ -29777,7 +28453,6 @@ pub unsafe fn vqabsb_s8(a: i8) -> i8 { pub unsafe fn vqabsh_s16(a: i16) -> i16 { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) } - #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] #[doc = "## Safety"] @@ -29787,7 +28462,7 @@ pub unsafe fn vqabsh_s16(a: i16) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabss_s32(a: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i32" @@ -29796,7 +28471,6 @@ pub unsafe fn vqabss_s32(a: i32) -> i32 { } _vqabss_s32(a) } - #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] #[doc = "## Safety"] @@ -29806,7 +28480,7 @@ pub unsafe fn vqabss_s32(a: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] pub unsafe fn vqabsd_s64(a: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i64" @@ -29815,7 +28489,6 @@ pub unsafe fn vqabsd_s64(a: i64) -> i64 { } _vqabsd_s64(a) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] #[doc = "## Safety"] @@ -29829,7 +28502,6 @@ pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { let b: int8x8_t = vdup_n_s8(b); simd_extract!(vqadd_s8(a, b), 0) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] #[doc = "## Safety"] @@ -29843,7 +28515,6 @@ pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqadd_s16(a, b), 0) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] #[doc = "## Safety"] @@ -29857,7 +28528,6 @@ pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { let b: uint8x8_t = vdup_n_u8(b); simd_extract!(vqadd_u8(a, b), 0) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] #[doc = "## Safety"] @@ -29871,7 +28541,6 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { let b: uint16x4_t = vdup_n_u16(b); simd_extract!(vqadd_u16(a, b), 0) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] #[doc = "## Safety"] @@ -29881,7 +28550,7 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i32" @@ -29890,7 +28559,6 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { } _vqadds_s32(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] #[doc = "## Safety"] @@ -29900,7 +28568,7 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i64" @@ -29909,7 +28577,6 @@ pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { } _vqaddd_s64(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] #[doc = "## Safety"] @@ -29919,7 +28586,7 @@ pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i32" @@ -29928,7 +28595,6 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { } _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] #[doc = "## Safety"] @@ -29938,7 +28604,7 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i64" @@ -29947,7 +28613,6 @@ pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { } _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] #[doc = "## Safety"] @@ -29966,7 +28631,6 @@ pub unsafe fn vqdmlal_high_lane_s16( static_assert_uimm_bits!(N, 2); vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] #[doc = "## Safety"] @@ -29989,7 +28653,6 @@ pub unsafe fn vqdmlal_high_lane_s16( let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] #[doc = "## Safety"] @@ -30008,7 +28671,6 @@ pub unsafe fn vqdmlal_high_laneq_s16( static_assert_uimm_bits!(N, 3); vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] #[doc = "## Safety"] @@ -30031,7 +28693,6 @@ pub unsafe fn vqdmlal_high_laneq_s16( let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] #[doc = "## Safety"] @@ -30050,7 +28711,6 @@ pub unsafe fn vqdmlal_high_lane_s32( static_assert_uimm_bits!(N, 1); vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] #[doc = "## Safety"] @@ -30073,7 +28733,6 @@ pub unsafe fn vqdmlal_high_lane_s32( let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] #[doc = "## Safety"] @@ -30092,7 +28751,6 @@ pub unsafe fn vqdmlal_high_laneq_s32( static_assert_uimm_bits!(N, 2); vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] #[doc = "## Safety"] @@ -30115,7 +28773,6 @@ pub unsafe fn vqdmlal_high_laneq_s32( let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] #[doc = "## Safety"] @@ -30128,7 +28785,6 @@ pub unsafe fn vqdmlal_high_laneq_s32( pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_high_n_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] #[doc = "## Safety"] @@ -30144,7 +28800,6 @@ pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_n_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] #[doc = "## Safety"] @@ -30157,7 +28812,6 @@ pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { vqaddq_s32(a, vqdmull_high_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] #[doc = "## Safety"] @@ -30174,7 +28828,6 @@ pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] #[doc = "## Safety"] @@ -30187,7 +28840,6 @@ pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_high_n_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] #[doc = "## Safety"] @@ -30203,7 +28855,6 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_n_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] #[doc = "## Safety"] @@ -30216,7 +28867,6 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { vqaddq_s64(a, vqdmull_high_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] #[doc = "## Safety"] @@ -30233,7 +28883,6 @@ pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6 let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] #[doc = "## Safety"] @@ -30252,7 +28901,6 @@ pub unsafe fn vqdmlal_laneq_s16( static_assert_uimm_bits!(N, 3); vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] #[doc = "## Safety"] @@ -30275,7 +28923,6 @@ pub unsafe fn vqdmlal_laneq_s16( let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_laneq_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] #[doc = "## Safety"] @@ -30294,7 +28941,6 @@ pub unsafe fn vqdmlal_laneq_s32( static_assert_uimm_bits!(N, 2); vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] #[doc = "## Safety"] @@ -30317,7 +28963,6 @@ pub unsafe fn vqdmlal_laneq_s32( let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_laneq_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] #[doc = "## Safety"] @@ -30332,7 +28977,6 @@ pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) - static_assert_uimm_bits!(LANE, 2); vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] #[doc = "## Safety"] @@ -30348,7 +28992,6 @@ pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] #[doc = "## Safety"] @@ -30363,7 +29006,6 @@ pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) static_assert_uimm_bits!(LANE, 3); vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] #[doc = "## Safety"] @@ -30379,7 +29021,6 @@ pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] #[doc = "## Safety"] @@ -30394,7 +29035,6 @@ pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) - static_assert_uimm_bits!(LANE, 1); vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] #[doc = "## Safety"] @@ -30410,7 +29050,6 @@ pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] #[doc = "## Safety"] @@ -30425,7 +29064,6 @@ pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) static_assert_uimm_bits!(LANE, 2); vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] #[doc = "## Safety"] @@ -30441,7 +29079,6 @@ pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] #[doc = "## Safety"] @@ -30454,7 +29091,6 @@ pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); vqadds_s32(a, simd_extract!(x, 0)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] #[doc = "## Safety"] @@ -30467,7 +29103,6 @@ pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c)); x as i64 } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] #[doc = "## Safety"] @@ -30486,7 +29121,6 @@ pub unsafe fn vqdmlsl_high_lane_s16( static_assert_uimm_bits!(N, 2); vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] #[doc = "## Safety"] @@ -30509,7 +29143,6 @@ pub unsafe fn vqdmlsl_high_lane_s16( let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] #[doc = "## Safety"] @@ -30528,7 +29161,6 @@ pub unsafe fn vqdmlsl_high_laneq_s16( static_assert_uimm_bits!(N, 3); vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] #[doc = "## Safety"] @@ -30551,7 +29183,6 @@ pub unsafe fn vqdmlsl_high_laneq_s16( let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] #[doc = "## Safety"] @@ -30570,7 +29201,6 @@ pub unsafe fn vqdmlsl_high_lane_s32( static_assert_uimm_bits!(N, 1); vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] #[doc = "## Safety"] @@ -30593,7 +29223,6 @@ pub unsafe fn vqdmlsl_high_lane_s32( let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] #[doc = "## Safety"] @@ -30612,7 +29241,6 @@ pub unsafe fn vqdmlsl_high_laneq_s32( static_assert_uimm_bits!(N, 2); vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] #[doc = "## Safety"] @@ -30635,7 +29263,6 @@ pub unsafe fn vqdmlsl_high_laneq_s32( let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] #[doc = "## Safety"] @@ -30648,7 +29275,6 @@ pub unsafe fn vqdmlsl_high_laneq_s32( pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_high_n_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] #[doc = "## Safety"] @@ -30664,7 +29290,6 @@ pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_n_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] #[doc = "## Safety"] @@ -30677,7 +29302,6 @@ pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_ pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { vqsubq_s32(a, vqdmull_high_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] #[doc = "## Safety"] @@ -30694,7 +29318,6 @@ pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] #[doc = "## Safety"] @@ -30707,7 +29330,6 @@ pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3 pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_high_n_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] #[doc = "## Safety"] @@ -30723,7 +29345,6 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_n_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] #[doc = "## Safety"] @@ -30736,7 +29357,6 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { vqsubq_s64(a, vqdmull_high_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] #[doc = "## Safety"] @@ -30753,7 +29373,6 @@ pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6 let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] #[doc = "## Safety"] @@ -30772,7 +29391,6 @@ pub unsafe fn vqdmlsl_laneq_s16( static_assert_uimm_bits!(N, 3); vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] #[doc = "## Safety"] @@ -30795,7 +29413,6 @@ pub unsafe fn vqdmlsl_laneq_s16( let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_laneq_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] #[doc = "## Safety"] @@ -30814,7 +29431,6 @@ pub unsafe fn vqdmlsl_laneq_s32( static_assert_uimm_bits!(N, 2); vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] #[doc = "## Safety"] @@ -30837,7 +29453,6 @@ pub unsafe fn vqdmlsl_laneq_s32( let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_laneq_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] #[doc = "## Safety"] @@ -30852,7 +29467,6 @@ pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) - static_assert_uimm_bits!(LANE, 2); vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] #[doc = "## Safety"] @@ -30868,7 +29482,6 @@ pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] #[doc = "## Safety"] @@ -30883,7 +29496,6 @@ pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) static_assert_uimm_bits!(LANE, 3); vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] #[doc = "## Safety"] @@ -30899,7 +29511,6 @@ pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] #[doc = "## Safety"] @@ -30914,7 +29525,6 @@ pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) - static_assert_uimm_bits!(LANE, 1); vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] #[doc = "## Safety"] @@ -30930,7 +29540,6 @@ pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] #[doc = "## Safety"] @@ -30945,7 +29554,6 @@ pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) static_assert_uimm_bits!(LANE, 2); vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] #[doc = "## Safety"] @@ -30961,7 +29569,6 @@ pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] #[doc = "## Safety"] @@ -30974,7 +29581,6 @@ pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); vqsubs_s32(a, simd_extract!(x, 0)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] #[doc = "## Safety"] @@ -30987,7 +29593,6 @@ pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c)); x as i64 } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] #[doc = "## Safety"] @@ -31002,7 +29607,6 @@ pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> i static_assert_uimm_bits!(LANE, 2); vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] #[doc = "## Safety"] @@ -31020,7 +29624,6 @@ pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> i let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] #[doc = "## Safety"] @@ -31035,7 +29638,6 @@ pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> static_assert_uimm_bits!(LANE, 2); vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] #[doc = "## Safety"] @@ -31053,7 +29655,6 @@ pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] #[doc = "## Safety"] @@ -31068,7 +29669,6 @@ pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> i static_assert_uimm_bits!(LANE, 1); vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] #[doc = "## Safety"] @@ -31086,7 +29686,6 @@ pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> i let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] #[doc = "## Safety"] @@ -31101,7 +29700,6 @@ pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> static_assert_uimm_bits!(LANE, 1); vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] #[doc = "## Safety"] @@ -31119,7 +29717,6 @@ pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] #[doc = "## Safety"] @@ -31135,7 +29732,6 @@ pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { let b: i16 = simd_extract!(b, N as u32); vqdmulhh_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] #[doc = "## Safety"] @@ -31152,7 +29748,6 @@ pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { let b: i16 = simd_extract!(b, N as u32); vqdmulhh_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] #[doc = "## Safety"] @@ -31168,7 +29763,6 @@ pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { let b: i16 = simd_extract!(b, N as u32); vqdmulhh_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] #[doc = "## Safety"] @@ -31185,7 +29779,6 @@ pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { let b: i16 = simd_extract!(b, N as u32); vqdmulhh_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] #[doc = "## Safety"] @@ -31199,7 +29792,6 @@ pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqdmulh_s16(a, b), 0) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] #[doc = "## Safety"] @@ -31213,7 +29805,6 @@ pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { let b: int32x2_t = vdup_n_s32(b); simd_extract!(vqdmulh_s32(a, b), 0) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] #[doc = "## Safety"] @@ -31229,7 +29820,6 @@ pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { let b: i32 = simd_extract!(b, N as u32); vqdmulhs_s32(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] #[doc = "## Safety"] @@ -31246,7 +29836,6 @@ pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { let b: i32 = simd_extract!(b, N as u32); vqdmulhs_s32(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] #[doc = "## Safety"] @@ -31262,7 +29851,6 @@ pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { let b: i32 = simd_extract!(b, N as u32); vqdmulhs_s32(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] #[doc = "## Safety"] @@ -31279,7 +29867,6 @@ pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { let b: i32 = simd_extract!(b, N as u32); vqdmulhs_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] #[doc = "## Safety"] @@ -31296,7 +29883,6 @@ pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] #[doc = "## Safety"] @@ -31316,7 +29902,6 @@ pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] #[doc = "## Safety"] @@ -31333,7 +29918,6 @@ pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] #[doc = "## Safety"] @@ -31353,7 +29937,6 @@ pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) - let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] #[doc = "## Safety"] @@ -31370,7 +29953,6 @@ pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] #[doc = "## Safety"] @@ -31390,7 +29972,6 @@ pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] #[doc = "## Safety"] @@ -31407,7 +29988,6 @@ pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] #[doc = "## Safety"] @@ -31427,7 +30007,6 @@ pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) - let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] #[doc = "## Safety"] @@ -31442,7 +30021,6 @@ pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmull_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] #[doc = "## Safety"] @@ -31459,7 +30037,6 @@ pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] #[doc = "## Safety"] @@ -31474,7 +30051,6 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmull_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] #[doc = "## Safety"] @@ -31491,7 +30067,6 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] #[doc = "## Safety"] @@ -31506,7 +30081,6 @@ pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); vqdmull_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] #[doc = "## Safety"] @@ -31524,7 +30098,6 @@ pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] #[doc = "## Safety"] @@ -31539,7 +30112,6 @@ pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); vqdmull_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] #[doc = "## Safety"] @@ -31557,7 +30129,6 @@ pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] #[doc = "## Safety"] @@ -31573,7 +30144,6 @@ pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] #[doc = "## Safety"] @@ -31592,7 +30162,6 @@ pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] #[doc = "## Safety"] @@ -31608,7 +30177,6 @@ pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] #[doc = "## Safety"] @@ -31627,7 +30195,6 @@ pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] #[doc = "## Safety"] @@ -31643,7 +30210,6 @@ pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { let b: i16 = simd_extract!(b, N as u32); vqdmullh_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] #[doc = "## Safety"] @@ -31660,7 +30226,6 @@ pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { let b: i16 = simd_extract!(b, N as u32); vqdmullh_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] #[doc = "## Safety"] @@ -31676,7 +30241,6 @@ pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { let b: i32 = simd_extract!(b, N as u32); vqdmulls_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] #[doc = "## Safety"] @@ -31693,7 +30257,6 @@ pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { let b: i32 = simd_extract!(b, N as u32); vqdmulls_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] #[doc = "## Safety"] @@ -31709,7 +30272,6 @@ pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { let b: i16 = simd_extract!(b, N as u32); vqdmullh_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] #[doc = "## Safety"] @@ -31726,7 +30288,6 @@ pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { let b: i16 = simd_extract!(b, N as u32); vqdmullh_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] #[doc = "## Safety"] @@ -31740,7 +30301,6 @@ pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqdmull_s16(a, b), 0) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] #[doc = "## Safety"] @@ -31756,7 +30316,6 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { let b: i32 = simd_extract!(b, N as u32); vqdmulls_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] #[doc = "## Safety"] @@ -31773,7 +30332,6 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { let b: i32 = simd_extract!(b, N as u32); vqdmulls_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] #[doc = "## Safety"] @@ -31783,7 +30341,7 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { #[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqdmulls.scalar" @@ -31792,7 +30350,6 @@ pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { } _vqdmulls_s32(a, b) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] #[doc = "## Safety"] @@ -31809,7 +30366,6 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] #[doc = "## Safety"] @@ -31833,7 +30389,6 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] #[doc = "## Safety"] @@ -31846,7 +30401,6 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] #[doc = "## Safety"] @@ -31862,7 +30416,6 @@ pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] #[doc = "## Safety"] @@ -31875,7 +30428,6 @@ pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] #[doc = "## Safety"] @@ -31891,7 +30443,6 @@ pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] #[doc = "## Safety"] @@ -31908,7 +30459,6 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] #[doc = "## Safety"] @@ -31932,7 +30482,6 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] #[doc = "## Safety"] @@ -31945,7 +30494,6 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] #[doc = "## Safety"] @@ -31961,7 +30509,6 @@ pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] #[doc = "## Safety"] @@ -31974,7 +30521,6 @@ pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] #[doc = "## Safety"] @@ -31990,7 +30536,6 @@ pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] #[doc = "## Safety"] @@ -32000,7 +30545,7 @@ pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_s64(a: i64) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" @@ -32009,7 +30554,6 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 { } _vqmovnd_s64(a) } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] #[doc = "## Safety"] @@ -32019,7 +30563,7 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqmovnd_u64(a: u64) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" @@ -32028,7 +30572,6 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 { } _vqmovnd_u64(a.as_signed()).as_unsigned() } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] #[doc = "## Safety"] @@ -32040,7 +30583,6 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 { pub unsafe fn vqmovnh_s16(a: i16) -> i8 { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] #[doc = "## Safety"] @@ -32052,7 +30594,6 @@ pub unsafe fn vqmovnh_s16(a: i16) -> i8 { pub unsafe fn vqmovns_s32(a: i32) -> i16 { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] #[doc = "## Safety"] @@ -32064,7 +30605,6 @@ pub unsafe fn vqmovns_s32(a: i32) -> i16 { pub unsafe fn vqmovnh_u16(a: u16) -> u8 { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) } - #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] #[doc = "## Safety"] @@ -32076,7 +30616,6 @@ pub unsafe fn vqmovnh_u16(a: u16) -> u8 { pub unsafe fn vqmovns_u32(a: u32) -> u16 { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] #[doc = "## Safety"] @@ -32093,7 +30632,6 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] #[doc = "## Safety"] @@ -32117,7 +30655,6 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] #[doc = "## Safety"] @@ -32130,7 +30667,6 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] #[doc = "## Safety"] @@ -32146,7 +30682,6 @@ pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] #[doc = "## Safety"] @@ -32159,7 +30694,6 @@ pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] #[doc = "## Safety"] @@ -32175,7 +30709,6 @@ pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] #[doc = "## Safety"] @@ -32187,7 +30720,6 @@ pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { pub unsafe fn vqmovunh_s16(a: i16) -> u8 { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] #[doc = "## Safety"] @@ -32199,7 +30731,6 @@ pub unsafe fn vqmovunh_s16(a: i16) -> u8 { pub unsafe fn vqmovuns_s32(a: i32) -> u16 { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] #[doc = "## Safety"] @@ -32211,7 +30742,6 @@ pub unsafe fn vqmovuns_s32(a: i32) -> u16 { pub unsafe fn vqmovund_s64(a: i64) -> u32 { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] #[doc = "## Safety"] @@ -32221,7 +30751,7 @@ pub unsafe fn vqmovund_s64(a: i64) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v1i64" @@ -32230,7 +30760,6 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { } _vqneg_s64(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] #[doc = "## Safety"] @@ -32241,7 +30770,7 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i64" @@ -32250,7 +30779,6 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { } _vqnegq_s64(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] #[doc = "## Safety"] @@ -32261,7 +30789,7 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i64" @@ -32272,7 +30800,6 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqnegq_s64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] #[doc = "## Safety"] @@ -32284,7 +30811,6 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { pub unsafe fn vqnegb_s8(a: i8) -> i8 { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] #[doc = "## Safety"] @@ -32296,7 +30822,6 @@ pub unsafe fn vqnegb_s8(a: i8) -> i8 { pub unsafe fn vqnegh_s16(a: i16) -> i16 { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] #[doc = "## Safety"] @@ -32308,7 +30833,6 @@ pub unsafe fn vqnegh_s16(a: i16) -> i16 { pub unsafe fn vqnegs_s32(a: i32) -> i32 { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] #[doc = "## Safety"] @@ -32320,7 +30844,6 @@ pub unsafe fn vqnegs_s32(a: i32) -> i32 { pub unsafe fn vqnegd_s64(a: i64) -> i64 { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] #[doc = "## Safety"] @@ -32340,7 +30863,6 @@ pub unsafe fn vqrdmlah_lane_s16( let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlah_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] #[doc = "## Safety"] @@ -32364,7 +30886,6 @@ pub unsafe fn vqrdmlah_lane_s16( let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] #[doc = "## Safety"] @@ -32384,7 +30905,6 @@ pub unsafe fn vqrdmlah_lane_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlah_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] #[doc = "## Safety"] @@ -32408,7 +30928,6 @@ pub unsafe fn vqrdmlah_lane_s32( let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] #[doc = "## Safety"] @@ -32428,7 +30947,6 @@ pub unsafe fn vqrdmlah_laneq_s16( let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlah_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] #[doc = "## Safety"] @@ -32452,7 +30970,6 @@ pub unsafe fn vqrdmlah_laneq_s16( let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] #[doc = "## Safety"] @@ -32472,7 +30989,6 @@ pub unsafe fn vqrdmlah_laneq_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlah_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] #[doc = "## Safety"] @@ -32496,7 +31012,6 @@ pub unsafe fn vqrdmlah_laneq_s32( let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] #[doc = "## Safety"] @@ -32529,7 +31044,6 @@ pub unsafe fn vqrdmlahq_lane_s16( ); vqrdmlahq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] #[doc = "## Safety"] @@ -32566,7 +31080,6 @@ pub unsafe fn vqrdmlahq_lane_s16( let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] #[doc = "## Safety"] @@ -32586,7 +31099,6 @@ pub unsafe fn vqrdmlahq_lane_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] #[doc = "## Safety"] @@ -32610,7 +31122,6 @@ pub unsafe fn vqrdmlahq_lane_s32( let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] #[doc = "## Safety"] @@ -32643,7 +31154,6 @@ pub unsafe fn vqrdmlahq_laneq_s16( ); vqrdmlahq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] #[doc = "## Safety"] @@ -32680,7 +31190,6 @@ pub unsafe fn vqrdmlahq_laneq_s16( let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] #[doc = "## Safety"] @@ -32700,7 +31209,6 @@ pub unsafe fn vqrdmlahq_laneq_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] #[doc = "## Safety"] @@ -32724,7 +31232,6 @@ pub unsafe fn vqrdmlahq_laneq_s32( let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] #[doc = "## Safety"] @@ -32735,7 +31242,7 @@ pub unsafe fn vqrdmlahq_laneq_s32( #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" @@ -32744,7 +31251,6 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ } _vqrdmlah_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] #[doc = "## Safety"] @@ -32755,7 +31261,7 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" @@ -32768,7 +31274,6 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ let ret_val: int16x4_t = _vqrdmlah_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] #[doc = "## Safety"] @@ -32779,7 +31284,7 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" @@ -32788,7 +31293,6 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 } _vqrdmlahq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] #[doc = "## Safety"] @@ -32799,7 +31303,7 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" @@ -32812,7 +31316,6 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 let ret_val: int16x8_t = _vqrdmlahq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] #[doc = "## Safety"] @@ -32823,7 +31326,7 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" @@ -32832,7 +31335,6 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ } _vqrdmlah_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] #[doc = "## Safety"] @@ -32843,7 +31345,7 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" @@ -32856,7 +31358,6 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ let ret_val: int32x2_t = _vqrdmlah_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] #[doc = "## Safety"] @@ -32867,7 +31368,7 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" @@ -32876,7 +31377,6 @@ pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 } _vqrdmlahq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] #[doc = "## Safety"] @@ -32887,7 +31387,7 @@ pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" @@ -32900,7 +31400,6 @@ pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 let ret_val: int32x4_t = _vqrdmlahq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] #[doc = "## Safety"] @@ -32915,7 +31414,6 @@ pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) static_assert_uimm_bits!(LANE, 2); vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] #[doc = "## Safety"] @@ -32931,7 +31429,6 @@ pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] #[doc = "## Safety"] @@ -32946,7 +31443,6 @@ pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) static_assert_uimm_bits!(LANE, 3); vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] #[doc = "## Safety"] @@ -32962,7 +31458,6 @@ pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] #[doc = "## Safety"] @@ -32977,7 +31472,6 @@ pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) static_assert_uimm_bits!(LANE, 1); vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] #[doc = "## Safety"] @@ -32993,7 +31487,6 @@ pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] #[doc = "## Safety"] @@ -33008,7 +31501,6 @@ pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) static_assert_uimm_bits!(LANE, 2); vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] #[doc = "## Safety"] @@ -33024,7 +31516,6 @@ pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] #[doc = "## Safety"] @@ -33039,7 +31530,6 @@ pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { let c: int16x4_t = vdup_n_s16(c); simd_extract!(vqrdmlah_s16(a, b, c), 0) } - #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] #[doc = "## Safety"] @@ -33054,7 +31544,6 @@ pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { let c: int32x2_t = vdup_n_s32(c); simd_extract!(vqrdmlah_s32(a, b, c), 0) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] #[doc = "## Safety"] @@ -33074,7 +31563,6 @@ pub unsafe fn vqrdmlsh_lane_s16( let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlsh_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] #[doc = "## Safety"] @@ -33098,7 +31586,6 @@ pub unsafe fn vqrdmlsh_lane_s16( let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] #[doc = "## Safety"] @@ -33118,7 +31605,6 @@ pub unsafe fn vqrdmlsh_lane_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlsh_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] #[doc = "## Safety"] @@ -33142,7 +31628,6 @@ pub unsafe fn vqrdmlsh_lane_s32( let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] #[doc = "## Safety"] @@ -33162,7 +31647,6 @@ pub unsafe fn vqrdmlsh_laneq_s16( let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlsh_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] #[doc = "## Safety"] @@ -33186,7 +31670,6 @@ pub unsafe fn vqrdmlsh_laneq_s16( let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] #[doc = "## Safety"] @@ -33206,7 +31689,6 @@ pub unsafe fn vqrdmlsh_laneq_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlsh_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] #[doc = "## Safety"] @@ -33230,7 +31712,6 @@ pub unsafe fn vqrdmlsh_laneq_s32( let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] #[doc = "## Safety"] @@ -33263,7 +31744,6 @@ pub unsafe fn vqrdmlshq_lane_s16( ); vqrdmlshq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] #[doc = "## Safety"] @@ -33300,7 +31780,6 @@ pub unsafe fn vqrdmlshq_lane_s16( let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] #[doc = "## Safety"] @@ -33320,7 +31799,6 @@ pub unsafe fn vqrdmlshq_lane_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] #[doc = "## Safety"] @@ -33344,7 +31822,6 @@ pub unsafe fn vqrdmlshq_lane_s32( let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] #[doc = "## Safety"] @@ -33377,7 +31854,6 @@ pub unsafe fn vqrdmlshq_laneq_s16( ); vqrdmlshq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] #[doc = "## Safety"] @@ -33414,7 +31890,6 @@ pub unsafe fn vqrdmlshq_laneq_s16( let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] #[doc = "## Safety"] @@ -33434,7 +31909,6 @@ pub unsafe fn vqrdmlshq_laneq_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] #[doc = "## Safety"] @@ -33458,7 +31932,6 @@ pub unsafe fn vqrdmlshq_laneq_s32( let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] #[doc = "## Safety"] @@ -33469,7 +31942,7 @@ pub unsafe fn vqrdmlshq_laneq_s32( #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" @@ -33478,7 +31951,6 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ } _vqrdmlsh_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] #[doc = "## Safety"] @@ -33489,7 +31961,7 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" @@ -33502,7 +31974,6 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ let ret_val: int16x4_t = _vqrdmlsh_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] #[doc = "## Safety"] @@ -33513,7 +31984,7 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" @@ -33522,7 +31993,6 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 } _vqrdmlshq_s16(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] #[doc = "## Safety"] @@ -33533,7 +32003,7 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" @@ -33546,7 +32016,6 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 let ret_val: int16x8_t = _vqrdmlshq_s16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] #[doc = "## Safety"] @@ -33557,7 +32026,7 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8 #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" @@ -33566,7 +32035,6 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ } _vqrdmlsh_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] #[doc = "## Safety"] @@ -33577,7 +32045,7 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" @@ -33590,7 +32058,6 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ let ret_val: int32x2_t = _vqrdmlsh_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] #[doc = "## Safety"] @@ -33601,7 +32068,7 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_ #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" @@ -33610,7 +32077,6 @@ pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 } _vqrdmlshq_s32(a, b, c) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] #[doc = "## Safety"] @@ -33621,7 +32087,7 @@ pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" @@ -33634,7 +32100,6 @@ pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4 let ret_val: int32x4_t = _vqrdmlshq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] #[doc = "## Safety"] @@ -33649,7 +32114,6 @@ pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) static_assert_uimm_bits!(LANE, 2); vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] #[doc = "## Safety"] @@ -33665,7 +32129,6 @@ pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] #[doc = "## Safety"] @@ -33680,7 +32143,6 @@ pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) static_assert_uimm_bits!(LANE, 3); vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] #[doc = "## Safety"] @@ -33696,7 +32158,6 @@ pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] #[doc = "## Safety"] @@ -33711,7 +32172,6 @@ pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) static_assert_uimm_bits!(LANE, 1); vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] #[doc = "## Safety"] @@ -33727,7 +32187,6 @@ pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] #[doc = "## Safety"] @@ -33742,7 +32201,6 @@ pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) static_assert_uimm_bits!(LANE, 2); vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] #[doc = "## Safety"] @@ -33758,7 +32216,6 @@ pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] #[doc = "## Safety"] @@ -33773,7 +32230,6 @@ pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { let c: int16x4_t = vdup_n_s16(c); simd_extract!(vqrdmlsh_s16(a, b, c), 0) } - #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] #[doc = "## Safety"] @@ -33788,7 +32244,6 @@ pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { let c: int32x2_t = vdup_n_s32(c); simd_extract!(vqrdmlsh_s32(a, b, c), 0) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] #[doc = "## Safety"] @@ -33803,7 +32258,6 @@ pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { static_assert_uimm_bits!(LANE, 2); vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] #[doc = "## Safety"] @@ -33819,7 +32273,6 @@ pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] #[doc = "## Safety"] @@ -33834,7 +32287,6 @@ pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 static_assert_uimm_bits!(LANE, 3); vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] #[doc = "## Safety"] @@ -33850,7 +32302,6 @@ pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] #[doc = "## Safety"] @@ -33865,7 +32316,6 @@ pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { static_assert_uimm_bits!(LANE, 1); vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] #[doc = "## Safety"] @@ -33881,7 +32331,6 @@ pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] #[doc = "## Safety"] @@ -33896,7 +32345,6 @@ pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 static_assert_uimm_bits!(LANE, 2); vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] #[doc = "## Safety"] @@ -33912,7 +32360,6 @@ pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] #[doc = "## Safety"] @@ -33924,7 +32371,6 @@ pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] #[doc = "## Safety"] @@ -33936,7 +32382,6 @@ pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] #[doc = "## Safety"] @@ -33950,7 +32395,6 @@ pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { let b: int8x8_t = vdup_n_s8(b); simd_extract!(vqrshl_s8(a, b), 0) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] #[doc = "## Safety"] @@ -33964,7 +32408,6 @@ pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqrshl_s16(a, b), 0) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] #[doc = "## Safety"] @@ -33978,7 +32421,6 @@ pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { let b: int8x8_t = vdup_n_s8(b); simd_extract!(vqrshl_u8(a, b), 0) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] #[doc = "## Safety"] @@ -33992,7 +32434,6 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqrshl_u16(a, b), 0) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] #[doc = "## Safety"] @@ -34002,7 +32443,7 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i64" @@ -34011,7 +32452,6 @@ pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { } _vqrshld_s64(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] #[doc = "## Safety"] @@ -34021,7 +32461,7 @@ pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i32" @@ -34030,7 +32470,6 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { } _vqrshls_s32(a, b) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] #[doc = "## Safety"] @@ -34040,7 +32479,7 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i32" @@ -34049,7 +32488,6 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { } _vqrshls_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] #[doc = "## Safety"] @@ -34059,7 +32497,7 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i64" @@ -34068,7 +32506,6 @@ pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { } _vqrshld_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] #[doc = "## Safety"] @@ -34087,7 +32524,6 @@ pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] #[doc = "## Safety"] @@ -34113,7 +32549,6 @@ pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] #[doc = "## Safety"] @@ -34128,7 +32563,6 @@ pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> in static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] #[doc = "## Safety"] @@ -34146,7 +32580,6 @@ pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> in let ret_val: int16x8_t = simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] #[doc = "## Safety"] @@ -34161,7 +32594,6 @@ pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> in static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] #[doc = "## Safety"] @@ -34179,7 +32611,6 @@ pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> in let ret_val: int32x4_t = simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] #[doc = "## Safety"] @@ -34198,7 +32629,6 @@ pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> u [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] #[doc = "## Safety"] @@ -34224,7 +32654,6 @@ pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> u [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] #[doc = "## Safety"] @@ -34239,7 +32668,6 @@ pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] #[doc = "## Safety"] @@ -34257,7 +32685,6 @@ pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] #[doc = "## Safety"] @@ -34272,7 +32699,6 @@ pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] #[doc = "## Safety"] @@ -34290,7 +32716,6 @@ pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] #[doc = "## Safety"] @@ -34305,7 +32730,6 @@ pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { let a: uint64x2_t = vdupq_n_u64(a); simd_extract!(vqrshrn_n_u64::(a), 0) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] #[doc = "## Safety"] @@ -34320,7 +32744,6 @@ pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { let a: uint16x8_t = vdupq_n_u16(a); simd_extract!(vqrshrn_n_u16::(a), 0) } - #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] #[doc = "## Safety"] @@ -34335,7 +32758,6 @@ pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { let a: uint32x4_t = vdupq_n_u32(a); simd_extract!(vqrshrn_n_u32::(a), 0) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] #[doc = "## Safety"] @@ -34350,7 +32772,6 @@ pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { let a: int16x8_t = vdupq_n_s16(a); simd_extract!(vqrshrn_n_s16::(a), 0) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] #[doc = "## Safety"] @@ -34365,7 +32786,6 @@ pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { let a: int32x4_t = vdupq_n_s32(a); simd_extract!(vqrshrn_n_s32::(a), 0) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] #[doc = "## Safety"] @@ -34380,7 +32800,6 @@ pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { let a: int64x2_t = vdupq_n_s64(a); simd_extract!(vqrshrn_n_s64::(a), 0) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] #[doc = "## Safety"] @@ -34399,7 +32818,6 @@ pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> u [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] #[doc = "## Safety"] @@ -34425,7 +32843,6 @@ pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> u [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] #[doc = "## Safety"] @@ -34440,7 +32857,6 @@ pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] #[doc = "## Safety"] @@ -34458,7 +32874,6 @@ pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] #[doc = "## Safety"] @@ -34473,7 +32888,6 @@ pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] #[doc = "## Safety"] @@ -34491,7 +32905,6 @@ pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] #[doc = "## Safety"] @@ -34506,7 +32919,6 @@ pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { let a: int64x2_t = vdupq_n_s64(a); simd_extract!(vqrshrun_n_s64::(a), 0) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] #[doc = "## Safety"] @@ -34521,7 +32933,6 @@ pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { let a: int16x8_t = vdupq_n_s16(a); simd_extract!(vqrshrun_n_s16::(a), 0) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] #[doc = "## Safety"] @@ -34536,7 +32947,6 @@ pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { let a: int32x4_t = vdupq_n_s32(a); simd_extract!(vqrshrun_n_s32::(a), 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] #[doc = "## Safety"] @@ -34550,7 +32960,6 @@ pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { static_assert_uimm_bits!(N, 3); simd_extract!(vqshl_n_s8::(vdup_n_s8(a)), 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] #[doc = "## Safety"] @@ -34564,7 +32973,6 @@ pub unsafe fn vqshld_n_s64(a: i64) -> i64 { static_assert_uimm_bits!(N, 6); simd_extract!(vqshl_n_s64::(vdup_n_s64(a)), 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] #[doc = "## Safety"] @@ -34578,7 +32986,6 @@ pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { static_assert_uimm_bits!(N, 4); simd_extract!(vqshl_n_s16::(vdup_n_s16(a)), 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] #[doc = "## Safety"] @@ -34592,7 +32999,6 @@ pub unsafe fn vqshls_n_s32(a: i32) -> i32 { static_assert_uimm_bits!(N, 5); simd_extract!(vqshl_n_s32::(vdup_n_s32(a)), 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] #[doc = "## Safety"] @@ -34606,7 +33012,6 @@ pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { static_assert_uimm_bits!(N, 3); simd_extract!(vqshl_n_u8::(vdup_n_u8(a)), 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] #[doc = "## Safety"] @@ -34620,7 +33025,6 @@ pub unsafe fn vqshld_n_u64(a: u64) -> u64 { static_assert_uimm_bits!(N, 6); simd_extract!(vqshl_n_u64::(vdup_n_u64(a)), 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] #[doc = "## Safety"] @@ -34634,7 +33038,6 @@ pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { static_assert_uimm_bits!(N, 4); simd_extract!(vqshl_n_u16::(vdup_n_u16(a)), 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] #[doc = "## Safety"] @@ -34648,7 +33051,6 @@ pub unsafe fn vqshls_n_u32(a: u32) -> u32 { static_assert_uimm_bits!(N, 5); simd_extract!(vqshl_n_u32::(vdup_n_u32(a)), 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] #[doc = "## Safety"] @@ -34661,7 +33063,6 @@ pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); simd_extract!(c, 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] #[doc = "## Safety"] @@ -34674,7 +33075,6 @@ pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); simd_extract!(c, 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] #[doc = "## Safety"] @@ -34687,7 +33087,6 @@ pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); simd_extract!(c, 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] #[doc = "## Safety"] @@ -34700,7 +33099,6 @@ pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); simd_extract!(c, 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] #[doc = "## Safety"] @@ -34713,7 +33111,6 @@ pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); simd_extract!(c, 0) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] #[doc = "## Safety"] @@ -34726,7 +33123,6 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); simd_extract!(c, 0) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] #[doc = "## Safety"] @@ -34736,7 +33132,7 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshl.i64" @@ -34745,7 +33141,6 @@ pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { } _vqshld_s64(a, b) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] #[doc = "## Safety"] @@ -34755,7 +33150,7 @@ pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.i64" @@ -34764,7 +33159,6 @@ pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { } _vqshld_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] #[doc = "## Safety"] @@ -34778,7 +33172,6 @@ pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { static_assert_uimm_bits!(N, 3); simd_extract!(vqshlu_n_s8::(vdup_n_s8(a)), 0) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] #[doc = "## Safety"] @@ -34792,7 +33185,6 @@ pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { static_assert_uimm_bits!(N, 6); simd_extract!(vqshlu_n_s64::(vdup_n_s64(a)), 0) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] #[doc = "## Safety"] @@ -34806,7 +33198,6 @@ pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { static_assert_uimm_bits!(N, 4); simd_extract!(vqshlu_n_s16::(vdup_n_s16(a)), 0) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] #[doc = "## Safety"] @@ -34820,7 +33211,6 @@ pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { static_assert_uimm_bits!(N, 5); simd_extract!(vqshlu_n_s32::(vdup_n_s32(a)), 0) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] #[doc = "## Safety"] @@ -34839,7 +33229,6 @@ pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] #[doc = "## Safety"] @@ -34865,7 +33254,6 @@ pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] #[doc = "## Safety"] @@ -34880,7 +33268,6 @@ pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] #[doc = "## Safety"] @@ -34898,7 +33285,6 @@ pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int let ret_val: int16x8_t = simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] #[doc = "## Safety"] @@ -34913,7 +33299,6 @@ pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] #[doc = "## Safety"] @@ -34931,7 +33316,6 @@ pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int let ret_val: int32x4_t = simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] #[doc = "## Safety"] @@ -34950,7 +33334,6 @@ pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] #[doc = "## Safety"] @@ -34976,7 +33359,6 @@ pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] #[doc = "## Safety"] @@ -34991,7 +33373,6 @@ pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] #[doc = "## Safety"] @@ -35009,7 +33390,6 @@ pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u let ret_val: uint16x8_t = simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] #[doc = "## Safety"] @@ -35024,7 +33404,6 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] #[doc = "## Safety"] @@ -35042,7 +33421,6 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u let ret_val: uint32x4_t = simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] #[doc = "## Safety"] @@ -35054,7 +33432,7 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.i32" @@ -35063,7 +33441,6 @@ pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { } _vqshrnd_n_s64(a, N) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] #[doc = "## Safety"] @@ -35075,7 +33452,7 @@ pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.i32" @@ -35084,7 +33461,6 @@ pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { } _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] #[doc = "## Safety"] @@ -35098,7 +33474,6 @@ pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { static_assert!(N >= 1 && N <= 8); simd_extract!(vqshrn_n_s16::(vdupq_n_s16(a)), 0) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] #[doc = "## Safety"] @@ -35112,7 +33487,6 @@ pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { static_assert!(N >= 1 && N <= 16); simd_extract!(vqshrn_n_s32::(vdupq_n_s32(a)), 0) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] #[doc = "## Safety"] @@ -35126,7 +33500,6 @@ pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { static_assert!(N >= 1 && N <= 8); simd_extract!(vqshrn_n_u16::(vdupq_n_u16(a)), 0) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] #[doc = "## Safety"] @@ -35140,7 +33513,6 @@ pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { static_assert!(N >= 1 && N <= 16); simd_extract!(vqshrn_n_u32::(vdupq_n_u32(a)), 0) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] #[doc = "## Safety"] @@ -35159,7 +33531,6 @@ pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] #[doc = "## Safety"] @@ -35185,7 +33556,6 @@ pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] #[doc = "## Safety"] @@ -35200,7 +33570,6 @@ pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> u static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] #[doc = "## Safety"] @@ -35218,7 +33587,6 @@ pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> u let ret_val: uint16x8_t = simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] #[doc = "## Safety"] @@ -35233,7 +33601,6 @@ pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> u static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] #[doc = "## Safety"] @@ -35251,7 +33618,6 @@ pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> u let ret_val: uint32x4_t = simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] #[doc = "## Safety"] @@ -35265,7 +33631,6 @@ pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { static_assert!(N >= 1 && N <= 32); simd_extract!(vqshrun_n_s64::(vdupq_n_s64(a)), 0) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] #[doc = "## Safety"] @@ -35279,7 +33644,6 @@ pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { static_assert!(N >= 1 && N <= 8); simd_extract!(vqshrun_n_s16::(vdupq_n_s16(a)), 0) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] #[doc = "## Safety"] @@ -35293,7 +33657,6 @@ pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { static_assert!(N >= 1 && N <= 16); simd_extract!(vqshrun_n_s32::(vdupq_n_s32(a)), 0) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] #[doc = "## Safety"] @@ -35307,7 +33670,6 @@ pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { let b: int8x8_t = vdup_n_s8(b); simd_extract!(vqsub_s8(a, b), 0) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] #[doc = "## Safety"] @@ -35321,7 +33683,6 @@ pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { let b: int16x4_t = vdup_n_s16(b); simd_extract!(vqsub_s16(a, b), 0) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] #[doc = "## Safety"] @@ -35335,7 +33696,6 @@ pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { let b: uint8x8_t = vdup_n_u8(b); simd_extract!(vqsub_u8(a, b), 0) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] #[doc = "## Safety"] @@ -35349,7 +33709,6 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { let b: uint16x4_t = vdup_n_u16(b); simd_extract!(vqsub_u16(a, b), 0) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] #[doc = "## Safety"] @@ -35359,7 +33718,7 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i32" @@ -35368,7 +33727,6 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { } _vqsubs_s32(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] #[doc = "## Safety"] @@ -35378,7 +33736,7 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i64" @@ -35387,7 +33745,6 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { } _vqsubd_s64(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] #[doc = "## Safety"] @@ -35397,7 +33754,7 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i32" @@ -35406,7 +33763,6 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { } _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] #[doc = "## Safety"] @@ -35416,7 +33772,7 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i64" @@ -35425,7 +33781,6 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { } _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] #[doc = "## Safety"] @@ -35436,7 +33791,7 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v8i8" @@ -35445,7 +33800,6 @@ unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { } _vqtbl1(a, b.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] #[doc = "## Safety"] @@ -35456,7 +33810,7 @@ unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v8i8" @@ -35468,7 +33822,6 @@ unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqtbl1(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] #[doc = "## Safety"] @@ -35479,7 +33832,7 @@ unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v16i8" @@ -35488,7 +33841,6 @@ unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { } _vqtbl1q(a, b.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] #[doc = "## Safety"] @@ -35499,7 +33851,7 @@ unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v16i8" @@ -35515,7 +33867,6 @@ unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] #[doc = "## Safety"] @@ -35528,7 +33879,6 @@ unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { vqtbl1(a, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] #[doc = "## Safety"] @@ -35544,7 +33894,6 @@ pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl1(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] #[doc = "## Safety"] @@ -35557,7 +33906,6 @@ pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { vqtbl1q(a, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] #[doc = "## Safety"] @@ -35577,7 +33925,6 @@ pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] #[doc = "## Safety"] @@ -35591,7 +33938,6 @@ pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { let x = transmute(vqtbl1(transmute(a), b)); x } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] #[doc = "## Safety"] @@ -35608,7 +33954,6 @@ pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] #[doc = "## Safety"] @@ -35622,7 +33967,6 @@ pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let x = transmute(vqtbl1q(transmute(a), b)); x } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] #[doc = "## Safety"] @@ -35643,7 +33987,6 @@ pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] #[doc = "## Safety"] @@ -35657,7 +34000,6 @@ pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { let x = transmute(vqtbl1(transmute(a), b)); x } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] #[doc = "## Safety"] @@ -35674,7 +34016,6 @@ pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] #[doc = "## Safety"] @@ -35688,7 +34029,6 @@ pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { let x = transmute(vqtbl1q(transmute(a), b)); x } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] #[doc = "## Safety"] @@ -35709,7 +34049,6 @@ pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] #[doc = "## Safety"] @@ -35720,7 +34059,7 @@ pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v8i8" @@ -35729,7 +34068,6 @@ unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { } _vqtbl2(a, b, c.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] #[doc = "## Safety"] @@ -35740,7 +34078,7 @@ unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v8i8" @@ -35753,7 +34091,6 @@ unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqtbl2(a, b, c.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] #[doc = "## Safety"] @@ -35764,7 +34101,7 @@ unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v16i8" @@ -35773,7 +34110,6 @@ unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { } _vqtbl2q(a, b, c.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] #[doc = "## Safety"] @@ -35784,7 +34120,7 @@ unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v16i8" @@ -35801,7 +34137,6 @@ unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] #[doc = "## Safety"] @@ -35814,7 +34149,6 @@ unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { vqtbl2(a.0, a.1, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] #[doc = "## Safety"] @@ -35840,7 +34174,6 @@ pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl2(a.0, a.1, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] #[doc = "## Safety"] @@ -35853,7 +34186,6 @@ pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { vqtbl2q(a.0, a.1, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] #[doc = "## Safety"] @@ -35883,7 +34215,6 @@ pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[doc = "## Safety"] @@ -35896,7 +34227,6 @@ pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[doc = "## Safety"] @@ -35922,7 +34252,6 @@ pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[doc = "## Safety"] @@ -35935,7 +34264,6 @@ pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[doc = "## Safety"] @@ -35965,7 +34293,6 @@ pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[doc = "## Safety"] @@ -35978,7 +34305,6 @@ pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[doc = "## Safety"] @@ -36004,7 +34330,6 @@ pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[doc = "## Safety"] @@ -36017,7 +34342,6 @@ pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[doc = "## Safety"] @@ -36047,7 +34371,6 @@ pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] #[doc = "## Safety"] @@ -36058,7 +34381,7 @@ pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v8i8" @@ -36067,7 +34390,6 @@ unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8 } _vqtbl3(a, b, c, d.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] #[doc = "## Safety"] @@ -36078,7 +34400,7 @@ unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8 #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v8i8" @@ -36092,7 +34414,6 @@ unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8 let ret_val: int8x8_t = _vqtbl3(a, b, c, d.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] #[doc = "## Safety"] @@ -36103,7 +34424,7 @@ unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8 #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v16i8" @@ -36112,7 +34433,6 @@ unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in } _vqtbl3q(a, b, c, d.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] #[doc = "## Safety"] @@ -36123,7 +34443,7 @@ unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v16i8" @@ -36141,7 +34461,6 @@ unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] #[doc = "## Safety"] @@ -36154,7 +34473,6 @@ unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { vqtbl3(a.0, a.1, a.2, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] #[doc = "## Safety"] @@ -36185,7 +34503,6 @@ pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl3(a.0, a.1, a.2, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] #[doc = "## Safety"] @@ -36198,7 +34515,6 @@ pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { vqtbl3q(a.0, a.1, a.2, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] #[doc = "## Safety"] @@ -36233,7 +34549,6 @@ pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[doc = "## Safety"] @@ -36246,7 +34561,6 @@ pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[doc = "## Safety"] @@ -36277,7 +34591,6 @@ pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[doc = "## Safety"] @@ -36290,7 +34603,6 @@ pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[doc = "## Safety"] @@ -36325,7 +34637,6 @@ pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[doc = "## Safety"] @@ -36338,7 +34649,6 @@ pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[doc = "## Safety"] @@ -36369,7 +34679,6 @@ pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[doc = "## Safety"] @@ -36382,7 +34691,6 @@ pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[doc = "## Safety"] @@ -36417,7 +34725,6 @@ pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[doc = "## Safety"] @@ -36428,7 +34735,7 @@ pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl4.v8i8" @@ -36438,7 +34745,6 @@ unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint } _vqtbl4(a, b, c, d, e.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[doc = "## Safety"] @@ -36449,7 +34755,7 @@ unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl4.v8i8" @@ -36465,7 +34771,6 @@ unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint let ret_val: int8x8_t = _vqtbl4(a, b, c, d, e.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] #[doc = "## Safety"] @@ -36482,7 +34787,7 @@ unsafe fn vqtbl4q( d: int8x16_t, e: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl4.v16i8" @@ -36497,7 +34802,6 @@ unsafe fn vqtbl4q( } _vqtbl4q(a, b, c, d, e.as_signed()) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] #[doc = "## Safety"] @@ -36514,7 +34818,7 @@ unsafe fn vqtbl4q( d: int8x16_t, e: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl4.v16i8" @@ -36539,7 +34843,6 @@ unsafe fn vqtbl4q( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] #[doc = "## Safety"] @@ -36552,7 +34855,6 @@ unsafe fn vqtbl4q( pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { vqtbl4(a.0, a.1, a.2, a.3, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] #[doc = "## Safety"] @@ -36588,7 +34890,6 @@ pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl4(a.0, a.1, a.2, a.3, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] #[doc = "## Safety"] @@ -36601,7 +34902,6 @@ pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { vqtbl4q(a.0, a.1, a.2, a.3, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] #[doc = "## Safety"] @@ -36641,7 +34941,6 @@ pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] #[doc = "## Safety"] @@ -36660,7 +34959,6 @@ pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { b, )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] #[doc = "## Safety"] @@ -36702,7 +35000,6 @@ pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[doc = "## Safety"] @@ -36721,7 +35018,6 @@ pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { b, )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[doc = "## Safety"] @@ -36767,7 +35063,6 @@ pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[doc = "## Safety"] @@ -36786,7 +35081,6 @@ pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { b, )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[doc = "## Safety"] @@ -36828,7 +35122,6 @@ pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[doc = "## Safety"] @@ -36847,7 +35140,6 @@ pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { b, )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[doc = "## Safety"] @@ -36893,7 +35185,6 @@ pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] #[doc = "## Safety"] @@ -36904,7 +35195,7 @@ pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v8i8" @@ -36913,7 +35204,6 @@ unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { } _vqtbx1(a, b, c.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] #[doc = "## Safety"] @@ -36924,7 +35214,7 @@ unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v8i8" @@ -36937,7 +35227,6 @@ unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqtbx1(a, b, c.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] #[doc = "## Safety"] @@ -36948,7 +35237,7 @@ unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v16i8" @@ -36957,7 +35246,6 @@ unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { } _vqtbx1q(a, b, c.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] #[doc = "## Safety"] @@ -36968,7 +35256,7 @@ unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v16i8" @@ -36985,7 +35273,6 @@ unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[doc = "## Safety"] @@ -36998,7 +35285,6 @@ unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { vqtbx1(a, b, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[doc = "## Safety"] @@ -37015,7 +35301,6 @@ pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbx1(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[doc = "## Safety"] @@ -37028,7 +35313,6 @@ pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { vqtbx1q(a, b, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[doc = "## Safety"] @@ -37049,7 +35333,6 @@ pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[doc = "## Safety"] @@ -37063,7 +35346,6 @@ pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t let x = transmute(vqtbx1(transmute(a), transmute(b), c)); x } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[doc = "## Safety"] @@ -37081,7 +35363,6 @@ pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t let ret_val: uint8x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[doc = "## Safety"] @@ -37095,7 +35376,6 @@ pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x1 let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); x } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[doc = "## Safety"] @@ -37117,7 +35397,6 @@ pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x1 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[doc = "## Safety"] @@ -37131,7 +35410,6 @@ pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t let x = transmute(vqtbx1(transmute(a), transmute(b), c)); x } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[doc = "## Safety"] @@ -37149,7 +35427,6 @@ pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t let ret_val: poly8x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[doc = "## Safety"] @@ -37163,7 +35440,6 @@ pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x1 let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); x } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[doc = "## Safety"] @@ -37185,7 +35461,6 @@ pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x1 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[doc = "## Safety"] @@ -37196,7 +35471,7 @@ pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x1 #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v8i8" @@ -37205,7 +35480,6 @@ unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x } _vqtbx2(a, b, c, d.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[doc = "## Safety"] @@ -37216,7 +35490,7 @@ unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v8i8" @@ -37230,7 +35504,6 @@ unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x let ret_val: int8x8_t = _vqtbx2(a, b, c, d.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] #[doc = "## Safety"] @@ -37241,7 +35514,7 @@ unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v16i8" @@ -37250,7 +35523,6 @@ unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in } _vqtbx2q(a, b, c, d.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] #[doc = "## Safety"] @@ -37261,7 +35533,7 @@ unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v16i8" @@ -37279,7 +35551,6 @@ unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] #[doc = "## Safety"] @@ -37292,7 +35563,6 @@ unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { vqtbx2(a, b.0, b.1, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] #[doc = "## Safety"] @@ -37319,7 +35589,6 @@ pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbx2(a, b.0, b.1, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] #[doc = "## Safety"] @@ -37332,7 +35601,6 @@ pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { vqtbx2q(a, b.0, b.1, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] #[doc = "## Safety"] @@ -37363,7 +35631,6 @@ pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] @@ -37376,7 +35643,6 @@ pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16 pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] @@ -37403,7 +35669,6 @@ pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_ let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] @@ -37416,7 +35681,6 @@ pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_ pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] @@ -37447,7 +35711,6 @@ pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] @@ -37460,7 +35723,6 @@ pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8 pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] @@ -37487,7 +35749,6 @@ pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_ let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[doc = "## Safety"] @@ -37500,7 +35761,6 @@ pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_ pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[doc = "## Safety"] @@ -37531,7 +35791,6 @@ pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[doc = "## Safety"] @@ -37542,7 +35801,7 @@ pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8 #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx3.v8i8" @@ -37551,7 +35810,6 @@ unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8 } _vqtbx3(a, b, c, d, e.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[doc = "## Safety"] @@ -37562,7 +35820,7 @@ unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8 #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx3.v8i8" @@ -37577,7 +35835,6 @@ unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8 let ret_val: int8x8_t = _vqtbx3(a, b, c, d, e.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] #[doc = "## Safety"] @@ -37594,7 +35851,7 @@ unsafe fn vqtbx3q( d: int8x16_t, e: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx3.v16i8" @@ -37609,7 +35866,6 @@ unsafe fn vqtbx3q( } _vqtbx3q(a, b, c, d, e.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] #[doc = "## Safety"] @@ -37626,7 +35882,7 @@ unsafe fn vqtbx3q( d: int8x16_t, e: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx3.v16i8" @@ -37651,7 +35907,6 @@ unsafe fn vqtbx3q( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[doc = "## Safety"] @@ -37664,7 +35919,6 @@ unsafe fn vqtbx3q( pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { vqtbx3(a, b.0, b.1, b.2, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[doc = "## Safety"] @@ -37696,7 +35950,6 @@ pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbx3(a, b.0, b.1, b.2, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[doc = "## Safety"] @@ -37709,7 +35962,6 @@ pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { vqtbx3q(a, b.0, b.1, b.2, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[doc = "## Safety"] @@ -37745,7 +35997,6 @@ pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[doc = "## Safety"] @@ -37764,7 +36015,6 @@ pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_ c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[doc = "## Safety"] @@ -37802,7 +36052,6 @@ pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_ )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] @@ -37821,7 +36070,6 @@ pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8 c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] @@ -37863,7 +36111,6 @@ pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] @@ -37882,7 +36129,6 @@ pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_ c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] @@ -37920,7 +36166,6 @@ pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_ )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] @@ -37939,7 +36184,6 @@ pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8 c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] @@ -37981,7 +36225,6 @@ pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[doc = "## Safety"] @@ -37999,7 +36242,7 @@ unsafe fn vqtbx4( e: int8x16_t, f: uint8x8_t, ) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx4.v8i8" @@ -38015,7 +36258,6 @@ unsafe fn vqtbx4( } _vqtbx4(a, b, c, d, e, f.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[doc = "## Safety"] @@ -38033,7 +36275,7 @@ unsafe fn vqtbx4( e: int8x16_t, f: uint8x8_t, ) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx4.v8i8" @@ -38056,7 +36298,6 @@ unsafe fn vqtbx4( let ret_val: int8x8_t = _vqtbx4(a, b, c, d, e, f.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] #[doc = "## Safety"] @@ -38074,7 +36315,7 @@ unsafe fn vqtbx4q( e: int8x16_t, f: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx4.v16i8" @@ -38090,7 +36331,6 @@ unsafe fn vqtbx4q( } _vqtbx4q(a, b, c, d, e, f.as_signed()) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] #[doc = "## Safety"] @@ -38108,7 +36348,7 @@ unsafe fn vqtbx4q( e: int8x16_t, f: uint8x16_t, ) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx4.v16i8" @@ -38135,7 +36375,6 @@ unsafe fn vqtbx4q( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[doc = "## Safety"] @@ -38148,7 +36387,6 @@ unsafe fn vqtbx4q( pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { vqtbx4(a, b.0, b.1, b.2, b.3, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[doc = "## Safety"] @@ -38185,7 +36423,6 @@ pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbx4(a, b.0, b.1, b.2, b.3, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[doc = "## Safety"] @@ -38198,7 +36435,6 @@ pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { vqtbx4q(a, b.0, b.1, b.2, b.3, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[doc = "## Safety"] @@ -38239,7 +36475,6 @@ pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] @@ -38259,7 +36494,6 @@ pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_ c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] @@ -38303,7 +36537,6 @@ pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_ )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] @@ -38323,7 +36556,6 @@ pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8 c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] @@ -38371,7 +36603,6 @@ pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] @@ -38391,7 +36622,6 @@ pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_ c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] @@ -38435,7 +36665,6 @@ pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_ )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] @@ -38455,7 +36684,6 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8 c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] @@ -38503,7 +36731,6 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Rotate and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[doc = "## Safety"] @@ -38514,7 +36741,7 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8 #[cfg_attr(test, assert_instr(rax1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.rax1" @@ -38523,7 +36750,6 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rotate and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[doc = "## Safety"] @@ -38534,7 +36760,7 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(rax1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.rax1" @@ -38546,7 +36772,6 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] #[doc = "## Safety"] @@ -38557,7 +36782,7 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v8i8" @@ -38566,7 +36791,6 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { } _vrbit_s8(a) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] #[doc = "## Safety"] @@ -38577,7 +36801,7 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v8i8" @@ -38588,7 +36812,6 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vrbit_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] #[doc = "## Safety"] @@ -38599,7 +36822,7 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v16i8" @@ -38608,7 +36831,6 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { } _vrbitq_s8(a) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] #[doc = "## Safety"] @@ -38619,7 +36841,7 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v16i8" @@ -38634,7 +36856,6 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] #[doc = "## Safety"] @@ -38647,7 +36868,6 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { transmute(vrbit_s8(transmute(a))) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] #[doc = "## Safety"] @@ -38662,7 +36882,6 @@ pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] @@ -38675,7 +36894,6 @@ pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vrbitq_s8(transmute(a))) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] @@ -38694,7 +36912,6 @@ pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] @@ -38707,7 +36924,6 @@ pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { transmute(vrbit_s8(transmute(a))) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] @@ -38722,7 +36938,6 @@ pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] @@ -38735,7 +36950,6 @@ pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vrbitq_s8(transmute(a))) } - #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] @@ -38754,7 +36968,6 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] #[doc = "## Safety"] @@ -38764,7 +36977,7 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v1f64" @@ -38773,7 +36986,6 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { } _vrecpe_f64(a) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] #[doc = "## Safety"] @@ -38784,7 +36996,7 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v2f64" @@ -38793,7 +37005,6 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { } _vrecpeq_f64(a) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] #[doc = "## Safety"] @@ -38804,7 +37015,7 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v2f64" @@ -38815,7 +37026,6 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrecpeq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] #[doc = "## Safety"] @@ -38825,7 +37035,7 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecped_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f64" @@ -38834,7 +37044,6 @@ pub unsafe fn vrecped_f64(a: f64) -> f64 { } _vrecped_f64(a) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] #[doc = "## Safety"] @@ -38844,7 +37053,7 @@ pub unsafe fn vrecped_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpes_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f32" @@ -38853,7 +37062,6 @@ pub unsafe fn vrecpes_f32(a: f32) -> f32 { } _vrecpes_f32(a) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] #[doc = "## Safety"] @@ -38863,7 +37071,7 @@ pub unsafe fn vrecpes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v1f64" @@ -38872,7 +37080,6 @@ pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vrecps_f64(a, b) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] #[doc = "## Safety"] @@ -38883,7 +37090,7 @@ pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v2f64" @@ -38892,7 +37099,6 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vrecpsq_f64(a, b) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] #[doc = "## Safety"] @@ -38903,7 +37109,7 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v2f64" @@ -38915,7 +37121,6 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrecpsq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] #[doc = "## Safety"] @@ -38925,7 +37130,7 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f64" @@ -38934,7 +37139,6 @@ pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { } _vrecpsd_f64(a, b) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] #[doc = "## Safety"] @@ -38944,7 +37148,7 @@ pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f32" @@ -38953,7 +37157,6 @@ pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { } _vrecpss_f32(a, b) } - #[doc = "Floating-point reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] #[doc = "## Safety"] @@ -38963,7 +37166,7 @@ pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxd_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f64" @@ -38972,7 +37175,6 @@ pub unsafe fn vrecpxd_f64(a: f64) -> f64 { } _vrecpxd_f64(a) } - #[doc = "Floating-point reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] #[doc = "## Safety"] @@ -38982,7 +37184,7 @@ pub unsafe fn vrecpxd_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrecpxs_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f32" @@ -38991,7 +37193,6 @@ pub unsafe fn vrecpxs_f32(a: f32) -> f32 { } _vrecpxs_f32(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] @@ -39004,7 +37205,6 @@ pub unsafe fn vrecpxs_f32(a: f32) -> f32 { pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] @@ -39018,7 +37218,6 @@ pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] @@ -39031,7 +37230,6 @@ pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] @@ -39045,7 +37243,6 @@ pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] @@ -39058,7 +37255,6 @@ pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] @@ -39072,7 +37268,6 @@ pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] @@ -39085,7 +37280,6 @@ pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] @@ -39100,7 +37294,6 @@ pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] @@ -39113,7 +37306,6 @@ pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] @@ -39128,7 +37320,6 @@ pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[doc = "## Safety"] @@ -39141,7 +37332,6 @@ pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[doc = "## Safety"] @@ -39155,7 +37345,6 @@ pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[doc = "## Safety"] @@ -39168,7 +37357,6 @@ pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[doc = "## Safety"] @@ -39182,7 +37370,6 @@ pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[doc = "## Safety"] @@ -39195,7 +37382,6 @@ pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[doc = "## Safety"] @@ -39209,7 +37395,6 @@ pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[doc = "## Safety"] @@ -39222,7 +37407,6 @@ pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[doc = "## Safety"] @@ -39236,7 +37420,6 @@ pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] #[doc = "## Safety"] @@ -39248,7 +37431,6 @@ pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[doc = "## Safety"] @@ -39261,7 +37443,6 @@ pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[doc = "## Safety"] @@ -39275,7 +37456,6 @@ pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] @@ -39288,7 +37468,6 @@ pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] @@ -39302,7 +37481,6 @@ pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] @@ -39315,7 +37493,6 @@ pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] @@ -39329,7 +37506,6 @@ pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] #[doc = "## Safety"] @@ -39341,7 +37517,6 @@ pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] @@ -39354,7 +37529,6 @@ pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] @@ -39368,7 +37542,6 @@ pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] @@ -39381,7 +37554,6 @@ pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] @@ -39395,7 +37567,6 @@ pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] #[doc = "## Safety"] @@ -39407,7 +37578,6 @@ pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] @@ -39420,7 +37590,6 @@ pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] @@ -39434,7 +37603,6 @@ pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[doc = "## Safety"] @@ -39447,7 +37615,6 @@ pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[doc = "## Safety"] @@ -39462,7 +37629,6 @@ pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] @@ -39475,7 +37641,6 @@ pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] @@ -39494,7 +37659,6 @@ pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] @@ -39507,7 +37671,6 @@ pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] @@ -39522,7 +37685,6 @@ pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] @@ -39535,7 +37697,6 @@ pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] @@ -39550,7 +37711,6 @@ pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] @@ -39563,7 +37723,6 @@ pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] @@ -39578,7 +37737,6 @@ pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] @@ -39591,7 +37749,6 @@ pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] @@ -39610,7 +37767,6 @@ pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] @@ -39623,7 +37779,6 @@ pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] @@ -39638,7 +37793,6 @@ pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] @@ -39651,7 +37805,6 @@ pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] @@ -39666,7 +37819,6 @@ pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[doc = "## Safety"] @@ -39679,7 +37831,6 @@ pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[doc = "## Safety"] @@ -39694,7 +37845,6 @@ pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] @@ -39707,7 +37857,6 @@ pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] @@ -39726,7 +37875,6 @@ pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] @@ -39739,7 +37887,6 @@ pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] @@ -39754,7 +37901,6 @@ pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] @@ -39767,7 +37913,6 @@ pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] @@ -39782,7 +37927,6 @@ pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] @@ -39795,7 +37939,6 @@ pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] @@ -39809,7 +37952,6 @@ pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] @@ -39822,7 +37964,6 @@ pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] @@ -39837,7 +37978,6 @@ pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] @@ -39850,7 +37990,6 @@ pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] @@ -39864,7 +38003,6 @@ pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] @@ -39877,7 +38015,6 @@ pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] @@ -39892,7 +38029,6 @@ pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[doc = "## Safety"] @@ -39905,7 +38041,6 @@ pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[doc = "## Safety"] @@ -39919,7 +38054,6 @@ pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] @@ -39932,7 +38066,6 @@ pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] @@ -39947,7 +38080,6 @@ pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] #[doc = "## Safety"] @@ -39959,7 +38091,6 @@ pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] #[doc = "## Safety"] @@ -39971,7 +38102,6 @@ pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] @@ -39984,7 +38114,6 @@ pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] @@ -39999,7 +38128,6 @@ pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] @@ -40012,7 +38140,6 @@ pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] @@ -40027,7 +38154,6 @@ pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] @@ -40040,7 +38166,6 @@ pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] @@ -40054,7 +38179,6 @@ pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[doc = "## Safety"] @@ -40067,7 +38191,6 @@ pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[doc = "## Safety"] @@ -40082,7 +38205,6 @@ pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] @@ -40095,7 +38217,6 @@ pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] @@ -40109,7 +38230,6 @@ pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] @@ -40122,7 +38242,6 @@ pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] @@ -40137,7 +38256,6 @@ pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] @@ -40150,7 +38268,6 @@ pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] @@ -40164,7 +38281,6 @@ pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[doc = "## Safety"] @@ -40177,7 +38293,6 @@ pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[doc = "## Safety"] @@ -40192,7 +38307,6 @@ pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] #[doc = "## Safety"] @@ -40204,7 +38318,6 @@ pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] #[doc = "## Safety"] @@ -40216,7 +38329,6 @@ pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] @@ -40229,7 +38341,6 @@ pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] @@ -40244,7 +38355,6 @@ pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] @@ -40257,7 +38367,6 @@ pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] @@ -40272,7 +38381,6 @@ pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] @@ -40285,7 +38393,6 @@ pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] @@ -40299,7 +38406,6 @@ pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] @@ -40312,7 +38418,6 @@ pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] @@ -40327,7 +38432,6 @@ pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] @@ -40340,7 +38444,6 @@ pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] @@ -40354,7 +38457,6 @@ pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] @@ -40367,7 +38469,6 @@ pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] @@ -40382,7 +38483,6 @@ pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] @@ -40395,7 +38495,6 @@ pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] @@ -40409,7 +38508,6 @@ pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] #[doc = "## Safety"] @@ -40421,7 +38519,6 @@ pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] #[doc = "## Safety"] @@ -40433,7 +38530,6 @@ pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] #[doc = "## Safety"] @@ -40445,7 +38541,6 @@ pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] @@ -40458,7 +38553,6 @@ pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] @@ -40473,7 +38567,6 @@ pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] @@ -40486,7 +38579,6 @@ pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] @@ -40501,7 +38593,6 @@ pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] @@ -40514,7 +38605,6 @@ pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] @@ -40529,7 +38619,6 @@ pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] @@ -40542,7 +38631,6 @@ pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] @@ -40557,7 +38645,6 @@ pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] #[doc = "## Safety"] @@ -40568,7 +38655,7 @@ pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f32" @@ -40577,7 +38664,6 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { } _vrnd32x_f32(a) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] #[doc = "## Safety"] @@ -40588,7 +38674,7 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f32" @@ -40599,7 +38685,6 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnd32x_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[doc = "## Safety"] @@ -40610,7 +38695,7 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v4f32" @@ -40619,7 +38704,6 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { } _vrnd32xq_f32(a) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[doc = "## Safety"] @@ -40630,7 +38714,7 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v4f32" @@ -40641,7 +38725,6 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrnd32xq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[doc = "## Safety"] @@ -40652,7 +38735,7 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f64" @@ -40661,7 +38744,6 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { } _vrnd32xq_f64(a) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[doc = "## Safety"] @@ -40672,7 +38754,7 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f64" @@ -40683,7 +38765,6 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrnd32xq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] #[doc = "## Safety"] @@ -40693,7 +38774,7 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32x.f64" @@ -40702,7 +38783,6 @@ pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { } transmute(_vrnd32x_f64(simd_extract!(a, 0))) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[doc = "## Safety"] @@ -40713,7 +38793,7 @@ pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f32" @@ -40722,7 +38802,6 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { } _vrnd32z_f32(a) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[doc = "## Safety"] @@ -40733,7 +38812,7 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f32" @@ -40744,7 +38823,6 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnd32z_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[doc = "## Safety"] @@ -40755,7 +38833,7 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v4f32" @@ -40764,7 +38842,6 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { } _vrnd32zq_f32(a) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[doc = "## Safety"] @@ -40775,7 +38852,7 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v4f32" @@ -40786,7 +38863,6 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrnd32zq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[doc = "## Safety"] @@ -40797,7 +38873,7 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f64" @@ -40806,7 +38882,6 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { } _vrnd32zq_f64(a) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[doc = "## Safety"] @@ -40817,7 +38892,7 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f64" @@ -40828,7 +38903,6 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrnd32zq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] #[doc = "## Safety"] @@ -40838,7 +38912,7 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32z.f64" @@ -40847,7 +38921,6 @@ pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { } transmute(_vrnd32z_f64(simd_extract!(a, 0))) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[doc = "## Safety"] @@ -40858,7 +38931,7 @@ pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f32" @@ -40867,7 +38940,6 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { } _vrnd64x_f32(a) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[doc = "## Safety"] @@ -40878,7 +38950,7 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f32" @@ -40889,7 +38961,6 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnd64x_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[doc = "## Safety"] @@ -40900,7 +38971,7 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v4f32" @@ -40909,7 +38980,6 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { } _vrnd64xq_f32(a) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[doc = "## Safety"] @@ -40920,7 +38990,7 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v4f32" @@ -40931,7 +39001,6 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrnd64xq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[doc = "## Safety"] @@ -40942,7 +39011,7 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f64" @@ -40951,7 +39020,6 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { } _vrnd64xq_f64(a) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[doc = "## Safety"] @@ -40962,7 +39030,7 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f64" @@ -40973,7 +39041,6 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrnd64xq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] #[doc = "## Safety"] @@ -40983,7 +39050,7 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64x.f64" @@ -40992,7 +39059,6 @@ pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { } transmute(_vrnd64x_f64(simd_extract!(a, 0))) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[doc = "## Safety"] @@ -41003,7 +39069,7 @@ pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f32" @@ -41012,7 +39078,6 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { } _vrnd64z_f32(a) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[doc = "## Safety"] @@ -41023,7 +39088,7 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f32" @@ -41034,7 +39099,6 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnd64z_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[doc = "## Safety"] @@ -41045,7 +39109,7 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v4f32" @@ -41054,7 +39118,6 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { } _vrnd64zq_f32(a) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[doc = "## Safety"] @@ -41065,7 +39128,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v4f32" @@ -41076,7 +39139,6 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrnd64zq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[doc = "## Safety"] @@ -41087,7 +39149,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f64" @@ -41096,7 +39158,6 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { } _vrnd64zq_f64(a) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[doc = "## Safety"] @@ -41107,7 +39168,7 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f64" @@ -41118,7 +39179,6 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrnd64zq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] #[doc = "## Safety"] @@ -41128,7 +39188,7 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64z.f64" @@ -41137,7 +39197,6 @@ pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { } transmute(_vrnd64z_f64(simd_extract!(a, 0))) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[doc = "## Safety"] @@ -41148,7 +39207,7 @@ pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f32" @@ -41157,7 +39216,6 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { } _vrnd_f32(a) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[doc = "## Safety"] @@ -41168,7 +39226,7 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f32" @@ -41179,7 +39237,6 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnd_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[doc = "## Safety"] @@ -41190,7 +39247,7 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v4f32" @@ -41199,7 +39256,6 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { } _vrndq_f32(a) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[doc = "## Safety"] @@ -41210,7 +39266,7 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v4f32" @@ -41221,7 +39277,6 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] #[doc = "## Safety"] @@ -41231,7 +39286,7 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v1f64" @@ -41240,7 +39295,6 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { } _vrnd_f64(a) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[doc = "## Safety"] @@ -41251,7 +39305,7 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f64" @@ -41260,7 +39314,6 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { } _vrndq_f64(a) } - #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[doc = "## Safety"] @@ -41271,7 +39324,7 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f64" @@ -41282,7 +39335,6 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[doc = "## Safety"] @@ -41293,7 +39345,7 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f32" @@ -41302,7 +39354,6 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { } _vrnda_f32(a) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[doc = "## Safety"] @@ -41313,7 +39364,7 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f32" @@ -41324,7 +39375,6 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrnda_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[doc = "## Safety"] @@ -41335,7 +39385,7 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v4f32" @@ -41344,7 +39394,6 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { } _vrndaq_f32(a) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[doc = "## Safety"] @@ -41355,7 +39404,7 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v4f32" @@ -41366,7 +39415,6 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndaq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] #[doc = "## Safety"] @@ -41376,7 +39424,7 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v1f64" @@ -41385,7 +39433,6 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { } _vrnda_f64(a) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[doc = "## Safety"] @@ -41396,7 +39443,7 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f64" @@ -41405,7 +39452,6 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { } _vrndaq_f64(a) } - #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[doc = "## Safety"] @@ -41416,7 +39462,7 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f64" @@ -41427,7 +39473,6 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndaq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[doc = "## Safety"] @@ -41438,7 +39483,7 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f32" @@ -41447,7 +39492,6 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { } _vrndi_f32(a) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[doc = "## Safety"] @@ -41458,7 +39502,7 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f32" @@ -41469,7 +39513,6 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrndi_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[doc = "## Safety"] @@ -41480,7 +39523,7 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v4f32" @@ -41489,7 +39532,6 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { } _vrndiq_f32(a) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[doc = "## Safety"] @@ -41500,7 +39542,7 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v4f32" @@ -41511,7 +39553,6 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndiq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] #[doc = "## Safety"] @@ -41521,7 +39562,7 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v1f64" @@ -41530,7 +39571,6 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { } _vrndi_f64(a) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[doc = "## Safety"] @@ -41541,7 +39581,7 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f64" @@ -41550,7 +39590,6 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { } _vrndiq_f64(a) } - #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[doc = "## Safety"] @@ -41561,7 +39600,7 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f64" @@ -41572,7 +39611,6 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndiq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[doc = "## Safety"] @@ -41583,7 +39621,7 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f32" @@ -41592,7 +39630,6 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { } _vrndm_f32(a) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[doc = "## Safety"] @@ -41603,7 +39640,7 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f32" @@ -41614,7 +39651,6 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrndm_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[doc = "## Safety"] @@ -41625,7 +39661,7 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v4f32" @@ -41634,7 +39670,6 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { } _vrndmq_f32(a) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[doc = "## Safety"] @@ -41645,7 +39680,7 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v4f32" @@ -41656,7 +39691,6 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndmq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] #[doc = "## Safety"] @@ -41666,7 +39700,7 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v1f64" @@ -41675,7 +39709,6 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { } _vrndm_f64(a) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[doc = "## Safety"] @@ -41686,7 +39719,7 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f64" @@ -41695,7 +39728,6 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { } _vrndmq_f64(a) } - #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[doc = "## Safety"] @@ -41706,7 +39738,7 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f64" @@ -41717,7 +39749,6 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndmq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] #[doc = "## Safety"] @@ -41727,7 +39758,7 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v1f64" @@ -41736,7 +39767,6 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { } _vrndn_f64(a) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[doc = "## Safety"] @@ -41747,7 +39777,7 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f64" @@ -41756,7 +39786,6 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { } _vrndnq_f64(a) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[doc = "## Safety"] @@ -41767,7 +39796,7 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f64" @@ -41778,7 +39807,6 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndnq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] #[doc = "## Safety"] @@ -41788,7 +39816,7 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndns_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.roundeven.f32" @@ -41797,7 +39825,6 @@ pub unsafe fn vrndns_f32(a: f32) -> f32 { } _vrndns_f32(a) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[doc = "## Safety"] @@ -41808,7 +39835,7 @@ pub unsafe fn vrndns_f32(a: f32) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f32" @@ -41817,7 +39844,6 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { } _vrndp_f32(a) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[doc = "## Safety"] @@ -41828,7 +39854,7 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f32" @@ -41839,7 +39865,6 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrndp_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[doc = "## Safety"] @@ -41850,7 +39875,7 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v4f32" @@ -41859,7 +39884,6 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { } _vrndpq_f32(a) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[doc = "## Safety"] @@ -41870,7 +39894,7 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v4f32" @@ -41881,7 +39905,6 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndpq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] #[doc = "## Safety"] @@ -41891,7 +39914,7 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v1f64" @@ -41900,7 +39923,6 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { } _vrndp_f64(a) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[doc = "## Safety"] @@ -41911,7 +39933,7 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f64" @@ -41920,7 +39942,6 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { } _vrndpq_f64(a) } - #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[doc = "## Safety"] @@ -41931,7 +39952,7 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f64" @@ -41942,7 +39963,6 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndpq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[doc = "## Safety"] @@ -41953,7 +39973,7 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f32" @@ -41962,7 +39982,6 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { } _vrndx_f32(a) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[doc = "## Safety"] @@ -41973,7 +39992,7 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f32" @@ -41984,7 +40003,6 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrndx_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[doc = "## Safety"] @@ -41995,7 +40013,7 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v4f32" @@ -42004,7 +40022,6 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { } _vrndxq_f32(a) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[doc = "## Safety"] @@ -42015,7 +40032,7 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v4f32" @@ -42026,7 +40043,6 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndxq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] #[doc = "## Safety"] @@ -42036,7 +40052,7 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v1f64" @@ -42045,7 +40061,6 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { } _vrndx_f64(a) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[doc = "## Safety"] @@ -42056,7 +40071,7 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f64" @@ -42065,7 +40080,6 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { } _vrndxq_f64(a) } - #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[doc = "## Safety"] @@ -42076,7 +40090,7 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f64" @@ -42087,7 +40101,6 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrndxq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] #[doc = "## Safety"] @@ -42097,7 +40110,7 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srshl.i64" @@ -42106,7 +40119,6 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { } _vrshld_s64(a, b) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] #[doc = "## Safety"] @@ -42116,7 +40128,7 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.i64" @@ -42125,7 +40137,6 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { } _vrshld_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] #[doc = "## Safety"] @@ -42139,7 +40150,6 @@ pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { static_assert!(N >= 1 && N <= 64); vrshld_s64(a, -N as i64) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] #[doc = "## Safety"] @@ -42153,7 +40163,6 @@ pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { static_assert!(N >= 1 && N <= 64); vrshld_u64(a, -N as i64) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[doc = "## Safety"] @@ -42172,7 +40181,6 @@ pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[doc = "## Safety"] @@ -42198,7 +40206,6 @@ pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[doc = "## Safety"] @@ -42213,7 +40220,6 @@ pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[doc = "## Safety"] @@ -42231,7 +40237,6 @@ pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int let ret_val: int16x8_t = simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[doc = "## Safety"] @@ -42246,7 +40251,6 @@ pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[doc = "## Safety"] @@ -42264,7 +40268,6 @@ pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int let ret_val: int32x4_t = simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] #[doc = "## Safety"] @@ -42283,7 +40286,6 @@ pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] #[doc = "## Safety"] @@ -42309,7 +40311,6 @@ pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] #[doc = "## Safety"] @@ -42324,7 +40325,6 @@ pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] #[doc = "## Safety"] @@ -42342,7 +40342,6 @@ pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u let ret_val: uint16x8_t = simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] #[doc = "## Safety"] @@ -42357,7 +40356,6 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] #[doc = "## Safety"] @@ -42375,7 +40373,6 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u let ret_val: uint32x4_t = simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] #[doc = "## Safety"] @@ -42385,7 +40382,7 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v1f64" @@ -42394,7 +40391,6 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { } _vrsqrte_f64(a) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[doc = "## Safety"] @@ -42405,7 +40401,7 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v2f64" @@ -42414,7 +40410,6 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { } _vrsqrteq_f64(a) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[doc = "## Safety"] @@ -42425,7 +40420,7 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v2f64" @@ -42436,7 +40431,6 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrsqrteq_f64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] #[doc = "## Safety"] @@ -42446,7 +40440,7 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrted_f64(a: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f64" @@ -42455,7 +40449,6 @@ pub unsafe fn vrsqrted_f64(a: f64) -> f64 { } _vrsqrted_f64(a) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] #[doc = "## Safety"] @@ -42465,7 +40458,7 @@ pub unsafe fn vrsqrted_f64(a: f64) -> f64 { #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f32" @@ -42474,7 +40467,6 @@ pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { } _vrsqrtes_f32(a) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] #[doc = "## Safety"] @@ -42484,7 +40476,7 @@ pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v1f64" @@ -42493,7 +40485,6 @@ pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } _vrsqrts_f64(a, b) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[doc = "## Safety"] @@ -42504,7 +40495,7 @@ pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v2f64" @@ -42513,7 +40504,6 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vrsqrtsq_f64(a, b) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[doc = "## Safety"] @@ -42524,7 +40514,7 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v2f64" @@ -42536,7 +40526,6 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = _vrsqrtsq_f64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] #[doc = "## Safety"] @@ -42546,7 +40535,7 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f64" @@ -42555,7 +40544,6 @@ pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { } _vrsqrtsd_f64(a, b) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] #[doc = "## Safety"] @@ -42565,7 +40553,7 @@ pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f32" @@ -42574,7 +40562,6 @@ pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { } _vrsqrtss_f32(a, b) } - #[doc = "Signed rounding shift right and accumulate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] #[doc = "## Safety"] @@ -42589,7 +40576,6 @@ pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { let b: i64 = vrshrd_n_s64::(b); a.wrapping_add(b) } - #[doc = "Unsigned rounding shift right and accumulate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] #[doc = "## Safety"] @@ -42604,7 +40590,6 @@ pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { let b: u64 = vrshrd_n_u64::(b); a.wrapping_add(b) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[doc = "## Safety"] @@ -42618,7 +40603,6 @@ pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x let x: int8x8_t = vrsubhn_s16(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[doc = "## Safety"] @@ -42641,7 +40625,6 @@ pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[doc = "## Safety"] @@ -42655,7 +40638,6 @@ pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1 let x: int16x4_t = vrsubhn_s32(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[doc = "## Safety"] @@ -42673,7 +40655,6 @@ pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1 let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[doc = "## Safety"] @@ -42687,7 +40668,6 @@ pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3 let x: int32x2_t = vrsubhn_s64(b, c); simd_shuffle!(a, x, [0, 1, 2, 3]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[doc = "## Safety"] @@ -42705,7 +40685,6 @@ pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3 let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[doc = "## Safety"] @@ -42719,7 +40698,6 @@ pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui let x: uint8x8_t = vrsubhn_u16(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[doc = "## Safety"] @@ -42742,7 +40720,6 @@ pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[doc = "## Safety"] @@ -42756,7 +40733,6 @@ pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u let x: uint16x4_t = vrsubhn_u32(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[doc = "## Safety"] @@ -42774,7 +40750,6 @@ pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[doc = "## Safety"] @@ -42788,7 +40763,6 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u let x: uint32x2_t = vrsubhn_u64(b, c); simd_shuffle!(a, x, [0, 1, 2, 3]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[doc = "## Safety"] @@ -42806,7 +40780,6 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] #[doc = "## Safety"] @@ -42820,7 +40793,6 @@ pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x static_assert!(LANE == 0); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] #[doc = "## Safety"] @@ -42835,7 +40807,6 @@ pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64 static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] #[doc = "## Safety"] @@ -42852,7 +40823,6 @@ pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64 let ret_val: float64x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "SHA512 hash update part 2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[doc = "## Safety"] @@ -42863,7 +40833,7 @@ pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64 #[cfg_attr(test, assert_instr(sha512h2))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h2" @@ -42872,7 +40842,6 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin } _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SHA512 hash update part 2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[doc = "## Safety"] @@ -42883,7 +40852,7 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin #[cfg_attr(test, assert_instr(sha512h2))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h2" @@ -42897,7 +40866,6 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "SHA512 hash update part 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[doc = "## Safety"] @@ -42908,7 +40876,7 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin #[cfg_attr(test, assert_instr(sha512h))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h" @@ -42917,7 +40885,6 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint } _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SHA512 hash update part 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[doc = "## Safety"] @@ -42928,7 +40895,7 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint #[cfg_attr(test, assert_instr(sha512h))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h" @@ -42942,7 +40909,6 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "SHA512 schedule update 0"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[doc = "## Safety"] @@ -42953,7 +40919,7 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint #[cfg_attr(test, assert_instr(sha512su0))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su0" @@ -42962,7 +40928,6 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "SHA512 schedule update 0"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[doc = "## Safety"] @@ -42973,7 +40938,7 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(sha512su0))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su0" @@ -42985,7 +40950,6 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "SHA512 schedule update 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[doc = "## Safety"] @@ -42996,7 +40960,7 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(sha512su1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su1" @@ -43005,7 +40969,6 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui } _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SHA512 schedule update 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[doc = "## Safety"] @@ -43016,7 +40979,7 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(test, assert_instr(sha512su1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su1" @@ -43030,7 +40993,6 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] #[doc = "## Safety"] @@ -43042,7 +41004,6 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { transmute(vshl_s64(transmute(a), transmute(b))) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] #[doc = "## Safety"] @@ -43054,7 +41015,6 @@ pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { transmute(vshl_u64(transmute(a), transmute(b))) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[doc = "## Safety"] @@ -43070,7 +41030,6 @@ pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vshll_n_s8::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[doc = "## Safety"] @@ -43088,7 +41047,6 @@ pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = vshll_n_s8::(b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[doc = "## Safety"] @@ -43104,7 +41062,6 @@ pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vshll_n_s16::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[doc = "## Safety"] @@ -43122,7 +41079,6 @@ pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = vshll_n_s16::(b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[doc = "## Safety"] @@ -43138,7 +41094,6 @@ pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); vshll_n_s32::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[doc = "## Safety"] @@ -43156,7 +41111,6 @@ pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = vshll_n_s32::(b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[doc = "## Safety"] @@ -43172,7 +41126,6 @@ pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vshll_n_u8::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[doc = "## Safety"] @@ -43190,7 +41143,6 @@ pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = vshll_n_u8::(b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[doc = "## Safety"] @@ -43206,7 +41158,6 @@ pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vshll_n_u16::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[doc = "## Safety"] @@ -43224,7 +41175,6 @@ pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = vshll_n_u16::(b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[doc = "## Safety"] @@ -43240,7 +41190,6 @@ pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); vshll_n_u32::(b) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[doc = "## Safety"] @@ -43258,7 +41207,6 @@ pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = vshll_n_u32::(b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[doc = "## Safety"] @@ -43277,7 +41225,6 @@ pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[doc = "## Safety"] @@ -43303,7 +41250,6 @@ pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[doc = "## Safety"] @@ -43318,7 +41264,6 @@ pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int1 static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[doc = "## Safety"] @@ -43336,7 +41281,6 @@ pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int1 let ret_val: int16x8_t = simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[doc = "## Safety"] @@ -43351,7 +41295,6 @@ pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int3 static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[doc = "## Safety"] @@ -43369,7 +41312,6 @@ pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int3 let ret_val: int32x4_t = simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[doc = "## Safety"] @@ -43388,7 +41330,6 @@ pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uin [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[doc = "## Safety"] @@ -43414,7 +41355,6 @@ pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uin [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] #[doc = "## Safety"] @@ -43429,7 +41369,6 @@ pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> ui static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] #[doc = "## Safety"] @@ -43447,7 +41386,6 @@ pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> ui let ret_val: uint16x8_t = simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] #[doc = "## Safety"] @@ -43462,7 +41400,6 @@ pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> ui static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] #[doc = "## Safety"] @@ -43480,7 +41417,6 @@ pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> ui let ret_val: uint32x4_t = simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] @@ -43493,7 +41429,7 @@ pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> ui #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v8i8" @@ -43502,7 +41438,6 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vsli_n_s8(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] @@ -43515,7 +41450,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v8i8" @@ -43527,7 +41462,6 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vsli_n_s8(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] @@ -43540,7 +41474,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v16i8" @@ -43549,7 +41483,6 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t } _vsliq_n_s8(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] @@ -43562,7 +41495,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v16i8" @@ -43578,7 +41511,6 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] @@ -43591,7 +41523,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v4i16" @@ -43600,7 +41532,6 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t } _vsli_n_s16(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] @@ -43613,7 +41544,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v4i16" @@ -43625,7 +41556,6 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = _vsli_n_s16(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] @@ -43638,7 +41568,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v8i16" @@ -43647,7 +41577,6 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t } _vsliq_n_s16(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] @@ -43660,7 +41589,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v8i16" @@ -43672,7 +41601,6 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = _vsliq_n_s16(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] @@ -43685,7 +41613,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 0 && N <= 31); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v2i32" @@ -43694,7 +41622,6 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t } _vsli_n_s32(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] @@ -43707,7 +41634,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 0 && N <= 31); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v2i32" @@ -43719,7 +41646,6 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = _vsli_n_s32(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] @@ -43732,7 +41658,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 0 && N <= 31); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v4i32" @@ -43741,7 +41667,6 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t } _vsliq_n_s32(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] @@ -43754,7 +41679,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 0 && N <= 31); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v4i32" @@ -43766,7 +41691,6 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = _vsliq_n_s32(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] #[doc = "## Safety"] @@ -43778,7 +41702,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N >= 0 && N <= 63); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v1i64" @@ -43787,7 +41711,6 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t } _vsli_n_s64(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] @@ -43800,7 +41723,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 0 && N <= 63); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v2i64" @@ -43809,7 +41732,6 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t } _vsliq_n_s64(a, b, N) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] @@ -43822,7 +41744,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 0 && N <= 63); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsli.v2i64" @@ -43834,7 +41756,6 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = _vsliq_n_s64(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] @@ -43849,7 +41770,6 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); transmute(vsli_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] @@ -43867,7 +41787,6 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] @@ -43882,7 +41801,6 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 static_assert_uimm_bits!(N, 3); transmute(vsliq_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] @@ -43904,7 +41822,6 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] @@ -43919,7 +41836,6 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 static_assert_uimm_bits!(N, 4); transmute(vsli_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] @@ -43937,7 +41853,6 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 let ret_val: uint16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] @@ -43952,7 +41867,6 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x static_assert_uimm_bits!(N, 4); transmute(vsliq_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] @@ -43970,7 +41884,6 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x let ret_val: uint16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] @@ -43985,7 +41898,6 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 static_assert!(N >= 0 && N <= 31); transmute(vsli_n_s32::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] @@ -44003,7 +41915,6 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 let ret_val: uint32x2_t = transmute(vsli_n_s32::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] @@ -44018,7 +41929,6 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x static_assert!(N >= 0 && N <= 31); transmute(vsliq_n_s32::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] @@ -44036,7 +41946,6 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x let ret_val: uint32x4_t = transmute(vsliq_n_s32::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] #[doc = "## Safety"] @@ -44050,7 +41959,6 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] @@ -44065,7 +41973,6 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x static_assert!(N >= 0 && N <= 63); transmute(vsliq_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] @@ -44083,7 +41990,6 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x let ret_val: uint64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] @@ -44098,7 +42004,6 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(N, 3); transmute(vsli_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] @@ -44116,7 +42021,6 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] @@ -44131,7 +42035,6 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 static_assert_uimm_bits!(N, 3); transmute(vsliq_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] @@ -44153,7 +42056,6 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] @@ -44168,7 +42070,6 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 static_assert_uimm_bits!(N, 4); transmute(vsli_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] @@ -44186,7 +42087,6 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 let ret_val: poly16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] @@ -44201,7 +42101,6 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x static_assert_uimm_bits!(N, 4); transmute(vsliq_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] @@ -44219,7 +42118,6 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x let ret_val: poly16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] #[doc = "## Safety"] @@ -44233,7 +42131,6 @@ pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[doc = "## Safety"] @@ -44248,7 +42145,6 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x static_assert!(N >= 0 && N <= 63); transmute(vsliq_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[doc = "## Safety"] @@ -44266,7 +42162,6 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x let ret_val: poly64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] #[doc = "## Safety"] @@ -44280,7 +42175,6 @@ pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] #[doc = "## Safety"] @@ -44294,7 +42188,6 @@ pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { static_assert!(N >= 0 && N <= 63); transmute(vsli_n_u64::(transmute(a), transmute(b))) } - #[doc = "SM3PARTW1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[doc = "## Safety"] @@ -44305,7 +42198,7 @@ pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(sm3partw1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw1" @@ -44314,7 +42207,6 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui } _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SM3PARTW1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[doc = "## Safety"] @@ -44325,7 +42217,7 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3partw1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw1" @@ -44339,7 +42231,6 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3PARTW2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[doc = "## Safety"] @@ -44350,7 +42241,7 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3partw2))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw2" @@ -44359,7 +42250,6 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui } _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SM3PARTW2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[doc = "## Safety"] @@ -44370,7 +42260,7 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3partw2))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw2" @@ -44384,7 +42274,6 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3SS1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[doc = "## Safety"] @@ -44395,7 +42284,7 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(test, assert_instr(sm3ss1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3ss1" @@ -44404,7 +42293,6 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 } _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "SM3SS1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[doc = "## Safety"] @@ -44415,7 +42303,7 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 #[cfg_attr(test, assert_instr(sm3ss1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3ss1" @@ -44429,7 +42317,6 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3TT1A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[doc = "## Safety"] @@ -44446,7 +42333,7 @@ pub unsafe fn vsm3tt1aq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1a" @@ -44455,7 +42342,6 @@ pub unsafe fn vsm3tt1aq_u32( } _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } - #[doc = "SM3TT1A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[doc = "## Safety"] @@ -44472,7 +42358,7 @@ pub unsafe fn vsm3tt1aq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1a" @@ -44486,7 +42372,6 @@ pub unsafe fn vsm3tt1aq_u32( _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3TT1B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[doc = "## Safety"] @@ -44503,7 +42388,7 @@ pub unsafe fn vsm3tt1bq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1b" @@ -44512,7 +42397,6 @@ pub unsafe fn vsm3tt1bq_u32( } _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } - #[doc = "SM3TT1B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[doc = "## Safety"] @@ -44529,7 +42413,7 @@ pub unsafe fn vsm3tt1bq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1b" @@ -44543,7 +42427,6 @@ pub unsafe fn vsm3tt1bq_u32( _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3TT2A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[doc = "## Safety"] @@ -44560,7 +42443,7 @@ pub unsafe fn vsm3tt2aq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2a" @@ -44569,7 +42452,6 @@ pub unsafe fn vsm3tt2aq_u32( } _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } - #[doc = "SM3TT2A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[doc = "## Safety"] @@ -44586,7 +42468,7 @@ pub unsafe fn vsm3tt2aq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2a" @@ -44600,7 +42482,6 @@ pub unsafe fn vsm3tt2aq_u32( _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM3TT2B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[doc = "## Safety"] @@ -44617,7 +42498,7 @@ pub unsafe fn vsm3tt2bq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2b" @@ -44626,7 +42507,6 @@ pub unsafe fn vsm3tt2bq_u32( } _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } - #[doc = "SM3TT2B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[doc = "## Safety"] @@ -44643,7 +42523,7 @@ pub unsafe fn vsm3tt2bq_u32( c: uint32x4_t, ) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2b" @@ -44657,7 +42537,6 @@ pub unsafe fn vsm3tt2bq_u32( _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM4 key"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[doc = "## Safety"] @@ -44668,7 +42547,7 @@ pub unsafe fn vsm3tt2bq_u32( #[cfg_attr(test, assert_instr(sm4ekey))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4ekey" @@ -44677,7 +42556,6 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "SM4 key"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[doc = "## Safety"] @@ -44688,7 +42566,7 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sm4ekey))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4ekey" @@ -44700,7 +42578,6 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SM4 encode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] #[doc = "## Safety"] @@ -44711,7 +42588,7 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sm4e))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4e" @@ -44720,7 +42597,6 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "SM4 encode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] #[doc = "## Safety"] @@ -44731,7 +42607,7 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(sm4e))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4e" @@ -44743,7 +42619,6 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[doc = "## Safety"] @@ -44754,7 +42629,7 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i8" @@ -44763,7 +42638,6 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } _vsqadd_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[doc = "## Safety"] @@ -44774,7 +42648,7 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i8" @@ -44786,7 +42660,6 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vsqadd_u8(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] #[doc = "## Safety"] @@ -44797,7 +42670,7 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v16i8" @@ -44806,7 +42679,6 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } _vsqaddq_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] #[doc = "## Safety"] @@ -44817,7 +42689,7 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v16i8" @@ -44833,7 +42705,6 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] #[doc = "## Safety"] @@ -44844,7 +42715,7 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i16" @@ -44853,7 +42724,6 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } _vsqadd_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] #[doc = "## Safety"] @@ -44864,7 +42734,7 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i16" @@ -44876,7 +42746,6 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vsqadd_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] #[doc = "## Safety"] @@ -44887,7 +42756,7 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i16" @@ -44896,7 +42765,6 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } _vsqaddq_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] #[doc = "## Safety"] @@ -44907,7 +42775,7 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i16" @@ -44919,7 +42787,6 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vsqaddq_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] #[doc = "## Safety"] @@ -44930,7 +42797,7 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i32" @@ -44939,7 +42806,6 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } _vsqadd_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] #[doc = "## Safety"] @@ -44950,7 +42816,7 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i32" @@ -44962,7 +42828,6 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vsqadd_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] #[doc = "## Safety"] @@ -44973,7 +42838,7 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i32" @@ -44982,7 +42847,6 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } _vsqaddq_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] #[doc = "## Safety"] @@ -44993,7 +42857,7 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i32" @@ -45005,7 +42869,6 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vsqaddq_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] #[doc = "## Safety"] @@ -45015,7 +42878,7 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v1i64" @@ -45024,7 +42887,6 @@ pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } _vsqadd_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] #[doc = "## Safety"] @@ -45035,7 +42897,7 @@ pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i64" @@ -45044,7 +42906,6 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } _vsqaddq_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] #[doc = "## Safety"] @@ -45055,7 +42916,7 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i64" @@ -45067,7 +42928,6 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vsqaddq_u64(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] #[doc = "## Safety"] @@ -45079,7 +42939,6 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) } - #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] #[doc = "## Safety"] @@ -45091,7 +42950,6 @@ pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) } - #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] #[doc = "## Safety"] @@ -45101,7 +42959,7 @@ pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i64" @@ -45110,7 +42968,6 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { } _vsqaddd_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] #[doc = "## Safety"] @@ -45120,7 +42977,7 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i32" @@ -45129,7 +42986,6 @@ pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { } _vsqadds_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] #[doc = "## Safety"] @@ -45142,7 +42998,6 @@ pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { simd_fsqrt(a) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] #[doc = "## Safety"] @@ -45157,7 +43012,6 @@ pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_fsqrt(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] #[doc = "## Safety"] @@ -45170,7 +43024,6 @@ pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { simd_fsqrt(a) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] #[doc = "## Safety"] @@ -45185,7 +43038,6 @@ pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_fsqrt(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] #[doc = "## Safety"] @@ -45197,7 +43049,6 @@ pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { simd_fsqrt(a) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] #[doc = "## Safety"] @@ -45210,7 +43061,6 @@ pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { simd_fsqrt(a) } - #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] #[doc = "## Safety"] @@ -45225,7 +43075,6 @@ pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_fsqrt(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] @@ -45238,7 +43087,7 @@ pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v8i8" @@ -45247,7 +43096,6 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vsri_n_s8(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] @@ -45260,7 +43108,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v8i8" @@ -45272,7 +43120,6 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vsri_n_s8(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] @@ -45285,7 +43132,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v16i8" @@ -45294,7 +43141,6 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t } _vsriq_n_s8(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] @@ -45307,7 +43153,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v16i8" @@ -45323,7 +43169,6 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] @@ -45336,7 +43181,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v4i16" @@ -45345,7 +43190,6 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t } _vsri_n_s16(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] @@ -45358,7 +43202,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v4i16" @@ -45370,7 +43214,6 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = _vsri_n_s16(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] @@ -45383,7 +43226,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v8i16" @@ -45392,7 +43235,6 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t } _vsriq_n_s16(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] @@ -45405,7 +43247,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v8i16" @@ -45417,7 +43259,6 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = _vsriq_n_s16(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] @@ -45430,7 +43271,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v2i32" @@ -45439,7 +43280,6 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t } _vsri_n_s32(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] @@ -45452,7 +43292,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v2i32" @@ -45464,7 +43304,6 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = _vsri_n_s32(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] @@ -45477,7 +43316,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v4i32" @@ -45486,7 +43325,6 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t } _vsriq_n_s32(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] @@ -45499,7 +43337,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v4i32" @@ -45511,7 +43349,6 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = _vsriq_n_s32(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[doc = "## Safety"] @@ -45523,7 +43360,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v1i64" @@ -45532,7 +43369,6 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t } _vsri_n_s64(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] @@ -45545,7 +43381,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v2i64" @@ -45554,7 +43390,6 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t } _vsriq_n_s64(a, b, N) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] @@ -45567,7 +43402,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vsri.v2i64" @@ -45579,7 +43414,6 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = _vsriq_n_s64(a, b, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] @@ -45594,7 +43428,6 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); transmute(vsri_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] @@ -45612,7 +43445,6 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] @@ -45627,7 +43459,6 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 static_assert!(N >= 1 && N <= 8); transmute(vsriq_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] @@ -45649,7 +43480,6 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] @@ -45664,7 +43494,6 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 static_assert!(N >= 1 && N <= 16); transmute(vsri_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] @@ -45682,7 +43511,6 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 let ret_val: uint16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] @@ -45697,7 +43525,6 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x static_assert!(N >= 1 && N <= 16); transmute(vsriq_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] @@ -45715,7 +43542,6 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x let ret_val: uint16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] @@ -45730,7 +43556,6 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 static_assert!(N >= 1 && N <= 32); transmute(vsri_n_s32::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] @@ -45748,7 +43573,6 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 let ret_val: uint32x2_t = transmute(vsri_n_s32::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] @@ -45763,7 +43587,6 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x static_assert!(N >= 1 && N <= 32); transmute(vsriq_n_s32::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] @@ -45781,7 +43604,6 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x let ret_val: uint32x4_t = transmute(vsriq_n_s32::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[doc = "## Safety"] @@ -45795,7 +43617,6 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] @@ -45810,7 +43631,6 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x static_assert!(N >= 1 && N <= 64); transmute(vsriq_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] @@ -45828,7 +43648,6 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x let ret_val: uint64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] @@ -45843,7 +43662,6 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert!(N >= 1 && N <= 8); transmute(vsri_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] @@ -45861,7 +43679,6 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] @@ -45876,7 +43693,6 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 static_assert!(N >= 1 && N <= 8); transmute(vsriq_n_s8::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] @@ -45898,7 +43714,6 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] @@ -45913,7 +43728,6 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 static_assert!(N >= 1 && N <= 16); transmute(vsri_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] @@ -45931,7 +43745,6 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 let ret_val: poly16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] @@ -45946,7 +43759,6 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x static_assert!(N >= 1 && N <= 16); transmute(vsriq_n_s16::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] @@ -45964,7 +43776,6 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x let ret_val: poly16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] #[doc = "## Safety"] @@ -45978,7 +43789,6 @@ pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] #[doc = "## Safety"] @@ -45993,7 +43803,6 @@ pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x static_assert!(N >= 1 && N <= 64); transmute(vsriq_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] #[doc = "## Safety"] @@ -46011,7 +43820,6 @@ pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x let ret_val: poly64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] #[doc = "## Safety"] @@ -46025,7 +43833,6 @@ pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64::(transmute(a), transmute(b))) } - #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] #[doc = "## Safety"] @@ -46039,7 +43846,6 @@ pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { static_assert!(N >= 1 && N <= 64); transmute(vsri_n_u64::(transmute(a), transmute(b))) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] @@ -46053,7 +43859,6 @@ pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] @@ -46068,7 +43873,6 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] @@ -46082,7 +43886,6 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] @@ -46097,7 +43900,6 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"] #[doc = "## Safety"] @@ -46110,7 +43912,6 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] @@ -46124,7 +43925,6 @@ pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] @@ -46139,7 +43939,6 @@ pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] @@ -46153,7 +43952,6 @@ pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] @@ -46168,7 +43966,6 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] @@ -46182,7 +43979,6 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] @@ -46197,7 +43993,6 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] @@ -46211,7 +44006,6 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] @@ -46226,7 +44020,6 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] @@ -46240,7 +44033,6 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] @@ -46255,7 +44047,6 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] @@ -46269,7 +44060,6 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] @@ -46284,7 +44074,6 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] @@ -46298,7 +44087,6 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] @@ -46313,7 +44101,6 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] @@ -46326,7 +44113,6 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] @@ -46340,7 +44126,6 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] @@ -46355,7 +44140,6 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] @@ -46369,7 +44153,6 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] @@ -46384,7 +44167,6 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] @@ -46398,7 +44180,6 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] @@ -46413,7 +44194,6 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] @@ -46427,7 +44207,6 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] @@ -46442,7 +44221,6 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] @@ -46456,7 +44234,6 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] @@ -46471,7 +44248,6 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] @@ -46485,7 +44261,6 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] @@ -46500,7 +44275,6 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] @@ -46514,7 +44288,6 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] @@ -46529,7 +44302,6 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] @@ -46542,7 +44314,6 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] @@ -46556,7 +44327,6 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] @@ -46571,7 +44341,6 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] @@ -46585,7 +44354,6 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] @@ -46600,7 +44368,6 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] @@ -46614,7 +44381,6 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] @@ -46629,7 +44395,6 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] @@ -46643,7 +44408,6 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] @@ -46658,7 +44422,6 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] @@ -46672,7 +44435,6 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] @@ -46687,7 +44449,6 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] @@ -46700,7 +44461,6 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] @@ -46714,7 +44474,6 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] @@ -46729,7 +44488,6 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); crate::ptr::write_unaligned(ptr.cast(), a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] #[doc = "## Safety"] @@ -46739,7 +44497,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" @@ -46748,7 +44506,6 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { } _vst1_f64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] @@ -46759,7 +44516,7 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" @@ -46768,7 +44525,6 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { } _vst1q_f64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] @@ -46779,7 +44535,7 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" @@ -46791,7 +44547,6 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1q_f64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] #[doc = "## Safety"] @@ -46801,7 +44556,7 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" @@ -46810,7 +44565,6 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { } _vst1_f64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] @@ -46821,7 +44575,7 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" @@ -46830,7 +44584,6 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { } _vst1q_f64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] @@ -46841,7 +44594,7 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" @@ -46854,7 +44607,6 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1q_f64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] #[doc = "## Safety"] @@ -46864,7 +44616,7 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" @@ -46879,7 +44631,6 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { } _vst1_f64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] @@ -46890,7 +44641,7 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" @@ -46905,7 +44656,6 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { } _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] @@ -46916,7 +44666,7 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" @@ -46936,7 +44686,6 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] #[doc = "## Safety"] @@ -46950,7 +44699,6 @@ pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { static_assert!(LANE == 0); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] @@ -46965,7 +44713,6 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] @@ -46981,7 +44728,6 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] #[doc = "## Safety"] @@ -46991,7 +44737,7 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" @@ -47000,7 +44746,6 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { } _vst2_f64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] #[doc = "## Safety"] @@ -47012,7 +44757,7 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" @@ -47021,7 +44766,6 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { } _vst2_lane_f64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] #[doc = "## Safety"] @@ -47033,7 +44777,7 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" @@ -47042,7 +44786,6 @@ pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { } _vst2_lane_s64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] #[doc = "## Safety"] @@ -47056,7 +44799,6 @@ pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { static_assert!(LANE == 0); vst2_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] #[doc = "## Safety"] @@ -47070,7 +44812,6 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { static_assert!(LANE == 0); vst2_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] @@ -47081,7 +44822,7 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" @@ -47090,7 +44831,6 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { } _vst2q_f64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] @@ -47101,7 +44841,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" @@ -47113,7 +44853,6 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2q_f64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] @@ -47124,7 +44863,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" @@ -47133,7 +44872,6 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { } _vst2q_s64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] @@ -47144,7 +44882,7 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" @@ -47156,7 +44894,6 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2q_s64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] @@ -47169,7 +44906,7 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" @@ -47178,7 +44915,6 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { } _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] @@ -47191,7 +44927,7 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" @@ -47203,7 +44939,6 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] @@ -47216,7 +44951,7 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" @@ -47225,7 +44960,6 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { } _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] @@ -47238,7 +44972,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" @@ -47258,7 +44992,6 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { ); _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] @@ -47271,7 +45004,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" @@ -47280,7 +45013,6 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { } _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] @@ -47293,7 +45025,7 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" @@ -47305,7 +45037,6 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] @@ -47320,7 +45051,6 @@ pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { static_assert_uimm_bits!(LANE, 1); vst2q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] @@ -47338,7 +45068,6 @@ pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] @@ -47353,7 +45082,6 @@ pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { static_assert_uimm_bits!(LANE, 4); vst2q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] @@ -47379,7 +45107,6 @@ pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { ); vst2q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] @@ -47394,7 +45121,6 @@ pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { static_assert_uimm_bits!(LANE, 1); vst2q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] @@ -47412,7 +45138,6 @@ pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] @@ -47427,7 +45152,6 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { static_assert_uimm_bits!(LANE, 4); vst2q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] @@ -47453,7 +45177,6 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { ); vst2q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] @@ -47466,7 +45189,6 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { vst2q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] @@ -47482,7 +45204,6 @@ pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] @@ -47495,7 +45216,6 @@ pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { vst2q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] @@ -47511,7 +45231,6 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] #[doc = "## Safety"] @@ -47521,7 +45240,7 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" @@ -47530,7 +45249,6 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { } _vst3_f64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] #[doc = "## Safety"] @@ -47542,7 +45260,7 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" @@ -47551,7 +45269,6 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { } _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] #[doc = "## Safety"] @@ -47563,7 +45280,7 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" @@ -47572,7 +45289,6 @@ pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { } _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] #[doc = "## Safety"] @@ -47586,7 +45302,6 @@ pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { static_assert!(LANE == 0); vst3_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] #[doc = "## Safety"] @@ -47600,7 +45315,6 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { static_assert!(LANE == 0); vst3_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] @@ -47611,7 +45325,7 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" @@ -47620,7 +45334,6 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { } _vst3q_f64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] @@ -47631,7 +45344,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" @@ -47644,7 +45357,6 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3q_f64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] @@ -47655,7 +45367,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" @@ -47664,7 +45376,6 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { } _vst3q_s64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] @@ -47675,7 +45386,7 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" @@ -47688,7 +45399,6 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3q_s64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] @@ -47701,7 +45411,7 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" @@ -47710,7 +45420,6 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { } _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] @@ -47723,7 +45432,7 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" @@ -47736,7 +45445,6 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] @@ -47749,7 +45457,7 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" @@ -47758,7 +45466,6 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { } _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] @@ -47771,7 +45478,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" @@ -47796,7 +45503,6 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { ); _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] @@ -47809,7 +45515,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" @@ -47818,7 +45524,6 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { } _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] @@ -47831,7 +45536,7 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" @@ -47844,7 +45549,6 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] #[doc = "## Safety"] @@ -47859,7 +45563,6 @@ pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { static_assert_uimm_bits!(LANE, 1); vst3q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] #[doc = "## Safety"] @@ -47878,7 +45581,6 @@ pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] @@ -47893,7 +45595,6 @@ pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { static_assert_uimm_bits!(LANE, 4); vst3q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] @@ -47924,7 +45625,6 @@ pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { ); vst3q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] @@ -47939,7 +45639,6 @@ pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { static_assert_uimm_bits!(LANE, 1); vst3q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] @@ -47958,7 +45657,6 @@ pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] @@ -47973,7 +45671,6 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { static_assert_uimm_bits!(LANE, 4); vst3q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] @@ -48004,7 +45701,6 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { ); vst3q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] @@ -48017,7 +45713,6 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { vst3q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] @@ -48034,7 +45729,6 @@ pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] @@ -48047,7 +45741,6 @@ pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { vst3q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] @@ -48064,7 +45757,6 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] #[doc = "## Safety"] @@ -48074,7 +45766,7 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" @@ -48083,7 +45775,6 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { } _vst4_f64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] #[doc = "## Safety"] @@ -48095,7 +45786,7 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" @@ -48111,7 +45802,6 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { } _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] #[doc = "## Safety"] @@ -48123,7 +45813,7 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { static_assert!(LANE == 0); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" @@ -48139,7 +45829,6 @@ pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { } _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] #[doc = "## Safety"] @@ -48153,7 +45842,6 @@ pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { static_assert!(LANE == 0); vst4_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] #[doc = "## Safety"] @@ -48167,7 +45855,6 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { static_assert!(LANE == 0); vst4_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] @@ -48178,7 +45865,7 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" @@ -48187,7 +45874,6 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { } _vst4q_f64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] @@ -48198,7 +45884,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" @@ -48212,7 +45898,6 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4q_f64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] @@ -48223,7 +45908,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" @@ -48232,7 +45917,6 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { } _vst4q_s64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] @@ -48243,7 +45927,7 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" @@ -48257,7 +45941,6 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4q_s64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] @@ -48270,7 +45953,7 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" @@ -48286,7 +45969,6 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { } _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] @@ -48299,7 +45981,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" @@ -48320,7 +46002,6 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] @@ -48333,7 +46014,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" @@ -48349,7 +46030,6 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { } _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] @@ -48362,7 +46042,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" @@ -48399,7 +46079,6 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { ); _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] @@ -48412,7 +46091,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" @@ -48428,7 +46107,6 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { } _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] @@ -48441,7 +46119,7 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" @@ -48462,7 +46140,6 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] #[doc = "## Safety"] @@ -48477,7 +46154,6 @@ pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { static_assert_uimm_bits!(LANE, 1); vst4q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] #[doc = "## Safety"] @@ -48497,7 +46173,6 @@ pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] @@ -48512,7 +46187,6 @@ pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { static_assert_uimm_bits!(LANE, 4); vst4q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] @@ -48548,7 +46222,6 @@ pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { ); vst4q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] @@ -48563,7 +46236,6 @@ pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { static_assert_uimm_bits!(LANE, 1); vst4q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] @@ -48583,7 +46255,6 @@ pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4q_lane_s64::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] @@ -48598,7 +46269,6 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { static_assert_uimm_bits!(LANE, 4); vst4q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] @@ -48634,7 +46304,6 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { ); vst4q_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] @@ -48647,7 +46316,6 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { vst4q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] @@ -48665,7 +46333,6 @@ pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] @@ -48678,7 +46345,6 @@ pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { vst4q_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] @@ -48696,7 +46362,6 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4q_s64(transmute(a), transmute(b)) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] #[doc = "## Safety"] @@ -48708,7 +46373,6 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] #[doc = "## Safety"] @@ -48721,7 +46385,6 @@ pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] #[doc = "## Safety"] @@ -48737,7 +46400,6 @@ pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] #[doc = "## Safety"] @@ -48749,7 +46411,6 @@ pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { a.wrapping_sub(b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] #[doc = "## Safety"] @@ -48761,7 +46422,6 @@ pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { a.wrapping_sub(b) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] #[doc = "## Safety"] @@ -48778,7 +46438,6 @@ pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let f: int16x8_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] #[doc = "## Safety"] @@ -48798,7 +46457,6 @@ pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] #[doc = "## Safety"] @@ -48815,7 +46473,6 @@ pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let f: int32x4_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] #[doc = "## Safety"] @@ -48835,7 +46492,6 @@ pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] #[doc = "## Safety"] @@ -48852,7 +46508,6 @@ pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let f: int64x2_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] #[doc = "## Safety"] @@ -48872,7 +46527,6 @@ pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] #[doc = "## Safety"] @@ -48889,7 +46543,6 @@ pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let f: uint16x8_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] #[doc = "## Safety"] @@ -48909,7 +46562,6 @@ pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] #[doc = "## Safety"] @@ -48926,7 +46578,6 @@ pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let f: uint32x4_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] #[doc = "## Safety"] @@ -48946,7 +46597,6 @@ pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] #[doc = "## Safety"] @@ -48963,7 +46613,6 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let f: uint64x2_t = simd_cast(e); simd_sub(d, f) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] #[doc = "## Safety"] @@ -48983,7 +46632,6 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_sub(d, f); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] #[doc = "## Safety"] @@ -48997,7 +46645,6 @@ pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_sub(a, simd_cast(c)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] #[doc = "## Safety"] @@ -49014,7 +46661,6 @@ pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] #[doc = "## Safety"] @@ -49028,7 +46674,6 @@ pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); simd_sub(a, simd_cast(c)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] #[doc = "## Safety"] @@ -49045,7 +46690,6 @@ pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] #[doc = "## Safety"] @@ -49059,7 +46703,6 @@ pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); simd_sub(a, simd_cast(c)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] #[doc = "## Safety"] @@ -49076,7 +46719,6 @@ pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] #[doc = "## Safety"] @@ -49090,7 +46732,6 @@ pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); simd_sub(a, simd_cast(c)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] #[doc = "## Safety"] @@ -49107,7 +46748,6 @@ pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] #[doc = "## Safety"] @@ -49121,7 +46761,6 @@ pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); simd_sub(a, simd_cast(c)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] #[doc = "## Safety"] @@ -49138,7 +46777,6 @@ pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] #[doc = "## Safety"] @@ -49152,7 +46790,6 @@ pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); simd_sub(a, simd_cast(c)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] #[doc = "## Safety"] @@ -49169,7 +46806,6 @@ pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_sub(a, simd_cast(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[doc = "## Safety"] @@ -49190,7 +46826,6 @@ pub unsafe fn vsudot_laneq_s32( let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vusdot_s32(a, transmute(c), b) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[doc = "## Safety"] @@ -49215,7 +46850,6 @@ pub unsafe fn vsudot_laneq_s32( let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[doc = "## Safety"] @@ -49236,7 +46870,6 @@ pub unsafe fn vsudotq_laneq_s32( let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vusdotq_s32(a, transmute(c), b) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[doc = "## Safety"] @@ -49261,7 +46894,6 @@ pub unsafe fn vsudotq_laneq_s32( let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] @@ -49274,7 +46906,6 @@ pub unsafe fn vsudotq_laneq_s32( pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] @@ -49290,7 +46921,6 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] @@ -49303,7 +46933,6 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] @@ -49319,7 +46948,6 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] @@ -49332,7 +46960,6 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] @@ -49348,7 +46975,6 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] @@ -49361,7 +46987,6 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] @@ -49379,7 +47004,6 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] @@ -49392,7 +47016,6 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] @@ -49410,7 +47033,6 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] @@ -49423,7 +47045,6 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] @@ -49441,7 +47062,6 @@ pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] @@ -49458,7 +47078,6 @@ pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { ); transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] @@ -49481,7 +47100,6 @@ pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] @@ -49498,7 +47116,6 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { ); transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] @@ -49521,7 +47138,6 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] @@ -49538,7 +47154,6 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { ); transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] @@ -49561,7 +47176,6 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] @@ -49575,7 +47189,6 @@ pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] @@ -49596,7 +47209,6 @@ pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] @@ -49610,7 +47222,6 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] @@ -49631,7 +47242,6 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] @@ -49645,7 +47255,6 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] @@ -49666,7 +47275,6 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] @@ -49687,7 +47295,6 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { a, ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] @@ -49712,7 +47319,6 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] @@ -49733,7 +47339,6 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { a, ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] @@ -49758,7 +47363,6 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] @@ -49779,7 +47383,6 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { a, ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] @@ -49804,7 +47407,6 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] @@ -49817,7 +47419,6 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] @@ -49836,7 +47437,6 @@ pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] @@ -49849,7 +47449,6 @@ pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] @@ -49868,7 +47467,6 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t let ret_val: uint8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] @@ -49881,7 +47479,6 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] @@ -49900,7 +47497,6 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t let ret_val: poly8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] @@ -49926,7 +47522,6 @@ pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { a, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] @@ -49959,7 +47554,6 @@ pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] @@ -49980,7 +47574,6 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t a, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] @@ -50008,7 +47601,6 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] @@ -50029,7 +47621,6 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t a, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] @@ -50057,7 +47648,6 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] @@ -50075,7 +47665,6 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { transmute(c), ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] @@ -50101,7 +47690,6 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] @@ -50119,7 +47707,6 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] @@ -50145,7 +47732,6 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] @@ -50163,7 +47749,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t c, )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] @@ -50189,7 +47774,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] #[doc = "## Safety"] @@ -50202,7 +47786,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] #[doc = "## Safety"] @@ -50218,7 +47801,6 @@ pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] #[doc = "## Safety"] @@ -50231,7 +47813,6 @@ pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] #[doc = "## Safety"] @@ -50247,7 +47828,6 @@ pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] #[doc = "## Safety"] @@ -50260,7 +47840,6 @@ pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] #[doc = "## Safety"] @@ -50276,7 +47855,6 @@ pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] #[doc = "## Safety"] @@ -50289,7 +47867,6 @@ pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] #[doc = "## Safety"] @@ -50305,7 +47882,6 @@ pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] #[doc = "## Safety"] @@ -50318,7 +47894,6 @@ pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] #[doc = "## Safety"] @@ -50334,7 +47909,6 @@ pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] #[doc = "## Safety"] @@ -50347,7 +47921,6 @@ pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] #[doc = "## Safety"] @@ -50363,7 +47936,6 @@ pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] #[doc = "## Safety"] @@ -50376,7 +47948,6 @@ pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] #[doc = "## Safety"] @@ -50392,7 +47963,6 @@ pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] #[doc = "## Safety"] @@ -50405,7 +47975,6 @@ pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] #[doc = "## Safety"] @@ -50421,7 +47990,6 @@ pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] #[doc = "## Safety"] @@ -50434,7 +48002,6 @@ pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] #[doc = "## Safety"] @@ -50450,7 +48017,6 @@ pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] #[doc = "## Safety"] @@ -50467,7 +48033,6 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] #[doc = "## Safety"] @@ -50491,7 +48056,6 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] #[doc = "## Safety"] @@ -50504,7 +48068,6 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] #[doc = "## Safety"] @@ -50520,7 +48083,6 @@ pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] #[doc = "## Safety"] @@ -50533,7 +48095,6 @@ pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] #[doc = "## Safety"] @@ -50549,7 +48110,6 @@ pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] #[doc = "## Safety"] @@ -50562,7 +48122,6 @@ pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] #[doc = "## Safety"] @@ -50578,7 +48137,6 @@ pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] #[doc = "## Safety"] @@ -50591,7 +48149,6 @@ pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] #[doc = "## Safety"] @@ -50607,7 +48164,6 @@ pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] #[doc = "## Safety"] @@ -50624,7 +48180,6 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] #[doc = "## Safety"] @@ -50648,7 +48203,6 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] #[doc = "## Safety"] @@ -50661,7 +48215,6 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] #[doc = "## Safety"] @@ -50677,7 +48230,6 @@ pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] #[doc = "## Safety"] @@ -50690,7 +48242,6 @@ pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] #[doc = "## Safety"] @@ -50706,7 +48257,6 @@ pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] #[doc = "## Safety"] @@ -50719,7 +48269,6 @@ pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] #[doc = "## Safety"] @@ -50735,7 +48284,6 @@ pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] #[doc = "## Safety"] @@ -50748,7 +48296,6 @@ pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] #[doc = "## Safety"] @@ -50764,7 +48311,6 @@ pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] #[doc = "## Safety"] @@ -50781,7 +48327,6 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] #[doc = "## Safety"] @@ -50805,7 +48350,6 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] #[doc = "## Safety"] @@ -50818,7 +48362,6 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [0, 4, 2, 6]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] #[doc = "## Safety"] @@ -50834,7 +48377,6 @@ pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] #[doc = "## Safety"] @@ -50847,7 +48389,6 @@ pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] #[doc = "## Safety"] @@ -50863,7 +48404,6 @@ pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] #[doc = "## Safety"] @@ -50876,7 +48416,6 @@ pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] #[doc = "## Safety"] @@ -50892,7 +48431,6 @@ pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] #[doc = "## Safety"] @@ -50905,7 +48443,6 @@ pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] #[doc = "## Safety"] @@ -50921,7 +48458,6 @@ pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] #[doc = "## Safety"] @@ -50934,7 +48470,6 @@ pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] #[doc = "## Safety"] @@ -50950,7 +48485,6 @@ pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] #[doc = "## Safety"] @@ -50963,7 +48497,6 @@ pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] #[doc = "## Safety"] @@ -50979,7 +48512,6 @@ pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] #[doc = "## Safety"] @@ -50992,7 +48524,6 @@ pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] #[doc = "## Safety"] @@ -51008,7 +48539,6 @@ pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] #[doc = "## Safety"] @@ -51021,7 +48551,6 @@ pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] #[doc = "## Safety"] @@ -51037,7 +48566,6 @@ pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] #[doc = "## Safety"] @@ -51050,7 +48578,6 @@ pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] #[doc = "## Safety"] @@ -51066,7 +48593,6 @@ pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] #[doc = "## Safety"] @@ -51079,7 +48605,6 @@ pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] #[doc = "## Safety"] @@ -51095,7 +48620,6 @@ pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] #[doc = "## Safety"] @@ -51108,7 +48632,6 @@ pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] #[doc = "## Safety"] @@ -51124,7 +48647,6 @@ pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] #[doc = "## Safety"] @@ -51141,7 +48663,6 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] #[doc = "## Safety"] @@ -51165,7 +48686,6 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] #[doc = "## Safety"] @@ -51178,7 +48698,6 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] #[doc = "## Safety"] @@ -51194,7 +48713,6 @@ pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] #[doc = "## Safety"] @@ -51207,7 +48725,6 @@ pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] #[doc = "## Safety"] @@ -51223,7 +48740,6 @@ pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] #[doc = "## Safety"] @@ -51236,7 +48752,6 @@ pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] #[doc = "## Safety"] @@ -51252,7 +48767,6 @@ pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] #[doc = "## Safety"] @@ -51265,7 +48779,6 @@ pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] #[doc = "## Safety"] @@ -51281,7 +48794,6 @@ pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] #[doc = "## Safety"] @@ -51298,7 +48810,6 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] #[doc = "## Safety"] @@ -51322,7 +48833,6 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] #[doc = "## Safety"] @@ -51335,7 +48845,6 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] #[doc = "## Safety"] @@ -51351,7 +48860,6 @@ pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] #[doc = "## Safety"] @@ -51364,7 +48872,6 @@ pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] #[doc = "## Safety"] @@ -51380,7 +48887,6 @@ pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] #[doc = "## Safety"] @@ -51393,7 +48899,6 @@ pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] #[doc = "## Safety"] @@ -51409,7 +48914,6 @@ pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] #[doc = "## Safety"] @@ -51422,7 +48926,6 @@ pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] #[doc = "## Safety"] @@ -51438,7 +48941,6 @@ pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] #[doc = "## Safety"] @@ -51455,7 +48957,6 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] #[doc = "## Safety"] @@ -51479,7 +48980,6 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] #[doc = "## Safety"] @@ -51492,7 +48992,6 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [1, 5, 3, 7]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] #[doc = "## Safety"] @@ -51508,7 +49007,6 @@ pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] #[doc = "## Safety"] @@ -51521,7 +49019,6 @@ pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } - #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] #[doc = "## Safety"] @@ -51537,7 +49034,6 @@ pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] #[doc = "## Safety"] @@ -51551,7 +49047,6 @@ pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { let d: i64x1 = i64x1::new(0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] #[doc = "## Safety"] @@ -51566,7 +49061,6 @@ pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let d: i64x2 = i64x2::new(0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] #[doc = "## Safety"] @@ -51584,7 +49078,6 @@ pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] #[doc = "## Safety"] @@ -51598,7 +49091,6 @@ pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { let d: i64x1 = i64x1::new(0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] #[doc = "## Safety"] @@ -51613,7 +49105,6 @@ pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { let d: i64x2 = i64x2::new(0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] #[doc = "## Safety"] @@ -51631,7 +49122,6 @@ pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] #[doc = "## Safety"] @@ -51645,7 +49135,6 @@ pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let d: u64x1 = u64x1::new(0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[doc = "## Safety"] @@ -51660,7 +49149,6 @@ pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let d: u64x2 = u64x2::new(0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[doc = "## Safety"] @@ -51678,7 +49166,6 @@ pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] #[doc = "## Safety"] @@ -51690,7 +49177,6 @@ pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { transmute(vtst_s64(transmute(a), transmute(b))) } - #[doc = "Compare bitwise test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] #[doc = "## Safety"] @@ -51702,7 +49188,6 @@ pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { transmute(vtst_u64(transmute(a), transmute(b))) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[doc = "## Safety"] @@ -51713,7 +49198,7 @@ pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i8" @@ -51722,7 +49207,6 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { } _vuqadd_s8(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[doc = "## Safety"] @@ -51733,7 +49217,7 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i8" @@ -51745,7 +49229,6 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vuqadd_s8(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[doc = "## Safety"] @@ -51756,7 +49239,7 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v16i8" @@ -51765,7 +49248,6 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { } _vuqaddq_s8(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[doc = "## Safety"] @@ -51776,7 +49258,7 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v16i8" @@ -51792,7 +49274,6 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[doc = "## Safety"] @@ -51803,7 +49284,7 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i16" @@ -51812,7 +49293,6 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { } _vuqadd_s16(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[doc = "## Safety"] @@ -51823,7 +49303,7 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i16" @@ -51835,7 +49315,6 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vuqadd_s16(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[doc = "## Safety"] @@ -51846,7 +49325,7 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i16" @@ -51855,7 +49334,6 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { } _vuqaddq_s16(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[doc = "## Safety"] @@ -51866,7 +49344,7 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i16" @@ -51878,7 +49356,6 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vuqaddq_s16(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[doc = "## Safety"] @@ -51889,7 +49366,7 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i32" @@ -51898,7 +49375,6 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { } _vuqadd_s32(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[doc = "## Safety"] @@ -51909,7 +49385,7 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i32" @@ -51921,7 +49397,6 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vuqadd_s32(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[doc = "## Safety"] @@ -51932,7 +49407,7 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i32" @@ -51941,7 +49416,6 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { } _vuqaddq_s32(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[doc = "## Safety"] @@ -51952,7 +49426,7 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i32" @@ -51964,7 +49438,6 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vuqaddq_s32(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] #[doc = "## Safety"] @@ -51974,7 +49447,7 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v1i64" @@ -51983,7 +49456,6 @@ pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { } _vuqadd_s64(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[doc = "## Safety"] @@ -51994,7 +49466,7 @@ pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i64" @@ -52003,7 +49475,6 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { } _vuqaddq_s64(a, b.as_signed()) } - #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[doc = "## Safety"] @@ -52014,7 +49485,7 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i64" @@ -52026,7 +49497,6 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vuqaddq_s64(a, b.as_signed()); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] #[doc = "## Safety"] @@ -52038,7 +49508,6 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) } - #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] #[doc = "## Safety"] @@ -52050,7 +49519,6 @@ pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) } - #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] #[doc = "## Safety"] @@ -52060,7 +49528,7 @@ pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i64" @@ -52069,7 +49537,6 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { } _vuqaddd_s64(a, b.as_signed()) } - #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] #[doc = "## Safety"] @@ -52079,7 +49546,7 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i32" @@ -52088,7 +49555,6 @@ pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { } _vuqadds_s32(a, b.as_signed()) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[doc = "## Safety"] @@ -52109,7 +49575,6 @@ pub unsafe fn vusdot_laneq_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vusdot_s32(a, b, transmute(c)) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[doc = "## Safety"] @@ -52134,7 +49599,6 @@ pub unsafe fn vusdot_laneq_s32( let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] #[doc = "## Safety"] @@ -52155,7 +49619,6 @@ pub unsafe fn vusdotq_laneq_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vusdotq_s32(a, b, transmute(c)) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] #[doc = "## Safety"] @@ -52180,7 +49643,6 @@ pub unsafe fn vusdotq_laneq_s32( let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[doc = "## Safety"] @@ -52193,7 +49655,6 @@ pub unsafe fn vusdotq_laneq_s32( pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[doc = "## Safety"] @@ -52209,7 +49670,6 @@ pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[doc = "## Safety"] @@ -52222,7 +49682,6 @@ pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[doc = "## Safety"] @@ -52238,7 +49697,6 @@ pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[doc = "## Safety"] @@ -52251,7 +49709,6 @@ pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[doc = "## Safety"] @@ -52267,7 +49724,6 @@ pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[doc = "## Safety"] @@ -52280,7 +49736,6 @@ pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[doc = "## Safety"] @@ -52296,7 +49751,6 @@ pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[doc = "## Safety"] @@ -52309,7 +49763,6 @@ pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[doc = "## Safety"] @@ -52325,7 +49778,6 @@ pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[doc = "## Safety"] @@ -52338,7 +49790,6 @@ pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[doc = "## Safety"] @@ -52354,7 +49805,6 @@ pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[doc = "## Safety"] @@ -52367,7 +49817,6 @@ pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[doc = "## Safety"] @@ -52383,7 +49832,6 @@ pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[doc = "## Safety"] @@ -52396,7 +49844,6 @@ pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[doc = "## Safety"] @@ -52412,7 +49859,6 @@ pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[doc = "## Safety"] @@ -52425,7 +49871,6 @@ pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[doc = "## Safety"] @@ -52441,7 +49886,6 @@ pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[doc = "## Safety"] @@ -52458,7 +49902,6 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[doc = "## Safety"] @@ -52482,7 +49925,6 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[doc = "## Safety"] @@ -52495,7 +49937,6 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[doc = "## Safety"] @@ -52511,7 +49952,6 @@ pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[doc = "## Safety"] @@ -52524,7 +49964,6 @@ pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[doc = "## Safety"] @@ -52540,7 +49979,6 @@ pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[doc = "## Safety"] @@ -52553,7 +49991,6 @@ pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[doc = "## Safety"] @@ -52569,7 +50006,6 @@ pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[doc = "## Safety"] @@ -52582,7 +50018,6 @@ pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[doc = "## Safety"] @@ -52598,7 +50033,6 @@ pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[doc = "## Safety"] @@ -52615,7 +50049,6 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[doc = "## Safety"] @@ -52639,7 +50072,6 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[doc = "## Safety"] @@ -52652,7 +50084,6 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[doc = "## Safety"] @@ -52668,7 +50099,6 @@ pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[doc = "## Safety"] @@ -52681,7 +50111,6 @@ pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[doc = "## Safety"] @@ -52697,7 +50126,6 @@ pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[doc = "## Safety"] @@ -52710,7 +50138,6 @@ pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[doc = "## Safety"] @@ -52726,7 +50153,6 @@ pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[doc = "## Safety"] @@ -52739,7 +50165,6 @@ pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[doc = "## Safety"] @@ -52755,7 +50180,6 @@ pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[doc = "## Safety"] @@ -52772,7 +50196,6 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[doc = "## Safety"] @@ -52796,7 +50219,6 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[doc = "## Safety"] @@ -52809,7 +50231,6 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [0, 2, 4, 6]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[doc = "## Safety"] @@ -52825,7 +50246,6 @@ pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[doc = "## Safety"] @@ -52838,7 +50258,6 @@ pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[doc = "## Safety"] @@ -52854,7 +50273,6 @@ pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[doc = "## Safety"] @@ -52867,7 +50285,6 @@ pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[doc = "## Safety"] @@ -52883,7 +50300,6 @@ pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[doc = "## Safety"] @@ -52896,7 +50312,6 @@ pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[doc = "## Safety"] @@ -52912,7 +50327,6 @@ pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[doc = "## Safety"] @@ -52925,7 +50339,6 @@ pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[doc = "## Safety"] @@ -52941,7 +50354,6 @@ pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[doc = "## Safety"] @@ -52954,7 +50366,6 @@ pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[doc = "## Safety"] @@ -52970,7 +50381,6 @@ pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[doc = "## Safety"] @@ -52983,7 +50393,6 @@ pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[doc = "## Safety"] @@ -52999,7 +50408,6 @@ pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[doc = "## Safety"] @@ -53012,7 +50420,6 @@ pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[doc = "## Safety"] @@ -53028,7 +50435,6 @@ pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[doc = "## Safety"] @@ -53041,7 +50447,6 @@ pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[doc = "## Safety"] @@ -53057,7 +50462,6 @@ pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[doc = "## Safety"] @@ -53070,7 +50474,6 @@ pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[doc = "## Safety"] @@ -53086,7 +50489,6 @@ pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[doc = "## Safety"] @@ -53099,7 +50501,6 @@ pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[doc = "## Safety"] @@ -53115,7 +50516,6 @@ pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[doc = "## Safety"] @@ -53132,7 +50532,6 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[doc = "## Safety"] @@ -53156,7 +50555,6 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[doc = "## Safety"] @@ -53169,7 +50567,6 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[doc = "## Safety"] @@ -53185,7 +50582,6 @@ pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[doc = "## Safety"] @@ -53198,7 +50594,6 @@ pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[doc = "## Safety"] @@ -53214,7 +50609,6 @@ pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[doc = "## Safety"] @@ -53227,7 +50621,6 @@ pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[doc = "## Safety"] @@ -53243,7 +50636,6 @@ pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[doc = "## Safety"] @@ -53256,7 +50648,6 @@ pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[doc = "## Safety"] @@ -53272,7 +50663,6 @@ pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[doc = "## Safety"] @@ -53289,7 +50679,6 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[doc = "## Safety"] @@ -53313,7 +50702,6 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[doc = "## Safety"] @@ -53326,7 +50714,6 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[doc = "## Safety"] @@ -53342,7 +50729,6 @@ pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[doc = "## Safety"] @@ -53355,7 +50741,6 @@ pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[doc = "## Safety"] @@ -53371,7 +50756,6 @@ pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[doc = "## Safety"] @@ -53384,7 +50768,6 @@ pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[doc = "## Safety"] @@ -53400,7 +50783,6 @@ pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[doc = "## Safety"] @@ -53413,7 +50795,6 @@ pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[doc = "## Safety"] @@ -53429,7 +50810,6 @@ pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[doc = "## Safety"] @@ -53446,7 +50826,6 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[doc = "## Safety"] @@ -53470,7 +50849,6 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[doc = "## Safety"] @@ -53483,7 +50861,6 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [1, 3, 5, 7]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[doc = "## Safety"] @@ -53499,7 +50876,6 @@ pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[doc = "## Safety"] @@ -53512,7 +50888,6 @@ pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[doc = "## Safety"] @@ -53528,7 +50903,6 @@ pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Exclusive OR and rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] #[doc = "## Safety"] @@ -53541,7 +50915,7 @@ pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(IMM6, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.xar" @@ -53550,7 +50924,6 @@ pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 } _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned() } - #[doc = "Exclusive OR and rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] #[doc = "## Safety"] @@ -53563,7 +50936,7 @@ pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(IMM6, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.xar" @@ -53575,7 +50948,6 @@ pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 let ret_val: uint64x2_t = _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[doc = "## Safety"] @@ -53588,7 +50960,6 @@ pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[doc = "## Safety"] @@ -53604,7 +50975,6 @@ pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[doc = "## Safety"] @@ -53617,7 +50987,6 @@ pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[doc = "## Safety"] @@ -53633,7 +51002,6 @@ pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[doc = "## Safety"] @@ -53646,7 +51014,6 @@ pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[doc = "## Safety"] @@ -53662,7 +51029,6 @@ pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[doc = "## Safety"] @@ -53675,7 +51041,6 @@ pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[doc = "## Safety"] @@ -53691,7 +51056,6 @@ pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[doc = "## Safety"] @@ -53708,7 +51072,6 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[doc = "## Safety"] @@ -53732,7 +51095,6 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[doc = "## Safety"] @@ -53745,7 +51107,6 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[doc = "## Safety"] @@ -53761,7 +51122,6 @@ pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[doc = "## Safety"] @@ -53774,7 +51134,6 @@ pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[doc = "## Safety"] @@ -53790,7 +51149,6 @@ pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[doc = "## Safety"] @@ -53803,7 +51161,6 @@ pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[doc = "## Safety"] @@ -53819,7 +51176,6 @@ pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[doc = "## Safety"] @@ -53832,7 +51188,6 @@ pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[doc = "## Safety"] @@ -53848,7 +51203,6 @@ pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[doc = "## Safety"] @@ -53861,7 +51215,6 @@ pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[doc = "## Safety"] @@ -53877,7 +51230,6 @@ pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[doc = "## Safety"] @@ -53890,7 +51242,6 @@ pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[doc = "## Safety"] @@ -53906,7 +51257,6 @@ pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[doc = "## Safety"] @@ -53923,7 +51273,6 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[doc = "## Safety"] @@ -53947,7 +51296,6 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[doc = "## Safety"] @@ -53960,7 +51308,6 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[doc = "## Safety"] @@ -53976,7 +51323,6 @@ pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[doc = "## Safety"] @@ -53989,7 +51335,6 @@ pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[doc = "## Safety"] @@ -54005,7 +51350,6 @@ pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[doc = "## Safety"] @@ -54018,7 +51362,6 @@ pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[doc = "## Safety"] @@ -54034,7 +51377,6 @@ pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[doc = "## Safety"] @@ -54047,7 +51389,6 @@ pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[doc = "## Safety"] @@ -54063,7 +51404,6 @@ pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[doc = "## Safety"] @@ -54076,7 +51416,6 @@ pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[doc = "## Safety"] @@ -54092,7 +51431,6 @@ pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[doc = "## Safety"] @@ -54105,7 +51443,6 @@ pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[doc = "## Safety"] @@ -54121,7 +51458,6 @@ pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[doc = "## Safety"] @@ -54138,7 +51474,6 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[doc = "## Safety"] @@ -54162,7 +51497,6 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[doc = "## Safety"] @@ -54175,7 +51509,6 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[doc = "## Safety"] @@ -54191,7 +51524,6 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[doc = "## Safety"] @@ -54204,7 +51536,6 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[doc = "## Safety"] @@ -54220,7 +51551,6 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[doc = "## Safety"] @@ -54233,7 +51563,6 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[doc = "## Safety"] @@ -54249,7 +51578,6 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[doc = "## Safety"] @@ -54262,7 +51590,6 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[doc = "## Safety"] @@ -54278,7 +51605,6 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[doc = "## Safety"] @@ -54291,7 +51617,6 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[doc = "## Safety"] @@ -54307,7 +51632,6 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[doc = "## Safety"] @@ -54320,7 +51644,6 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[doc = "## Safety"] @@ -54336,7 +51659,6 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[doc = "## Safety"] @@ -54349,7 +51671,6 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[doc = "## Safety"] @@ -54365,7 +51686,6 @@ pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[doc = "## Safety"] @@ -54382,7 +51702,6 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[doc = "## Safety"] @@ -54406,7 +51725,6 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[doc = "## Safety"] @@ -54419,7 +51737,6 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[doc = "## Safety"] @@ -54435,7 +51752,6 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[doc = "## Safety"] @@ -54448,7 +51764,6 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[doc = "## Safety"] @@ -54464,7 +51779,6 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[doc = "## Safety"] @@ -54477,7 +51791,6 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[doc = "## Safety"] @@ -54493,7 +51806,6 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[doc = "## Safety"] @@ -54506,7 +51818,6 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[doc = "## Safety"] @@ -54522,7 +51833,6 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[doc = "## Safety"] @@ -54535,7 +51845,6 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[doc = "## Safety"] @@ -54551,7 +51860,6 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[doc = "## Safety"] @@ -54564,7 +51872,6 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[doc = "## Safety"] @@ -54580,7 +51887,6 @@ pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[doc = "## Safety"] @@ -54597,7 +51903,6 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[doc = "## Safety"] @@ -54621,7 +51926,6 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[doc = "## Safety"] @@ -54634,7 +51938,6 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[doc = "## Safety"] @@ -54650,7 +51953,6 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[doc = "## Safety"] @@ -54663,7 +51965,6 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[doc = "## Safety"] @@ -54679,7 +51980,6 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[doc = "## Safety"] @@ -54692,7 +51992,6 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[doc = "## Safety"] @@ -54708,7 +52007,6 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[doc = "## Safety"] @@ -54721,7 +52019,6 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[doc = "## Safety"] @@ -54737,7 +52034,6 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[doc = "## Safety"] @@ -54750,7 +52046,6 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[doc = "## Safety"] @@ -54766,7 +52061,6 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[doc = "## Safety"] @@ -54779,7 +52073,6 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[doc = "## Safety"] @@ -54795,7 +52088,6 @@ pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] #[doc = "## Safety"] @@ -54812,7 +52104,6 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] #[doc = "## Safety"] @@ -54836,7 +52127,6 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] #[doc = "## Safety"] @@ -54849,7 +52139,6 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] #[doc = "## Safety"] @@ -54865,7 +52154,6 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] #[doc = "## Safety"] @@ -54878,7 +52166,6 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] #[doc = "## Safety"] @@ -54894,7 +52181,6 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] #[doc = "## Safety"] @@ -54907,7 +52193,6 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] #[doc = "## Safety"] diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 074613851c..ca0a5b2715 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -1,9 +1,9 @@ // This code is automatically generated. DO NOT MODIFY. // -// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: // // ``` -// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec // ``` #![allow(improper_ctypes)] @@ -29,7 +29,7 @@ use super::*; stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32b" @@ -39,7 +39,6 @@ pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { } ___crc32b(crc.as_signed(), data.as_signed() as i32).as_unsigned() } - #[doc = "CRC32-C single round checksum for bytes (8 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb)"] #[doc = "## Safety"] @@ -57,7 +56,7 @@ pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32cb" @@ -67,7 +66,6 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { } ___crc32cb(crc.as_signed(), data.as_signed() as i32).as_unsigned() } - #[doc = "CRC32-C single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] #[doc = "## Safety"] @@ -84,13 +82,12 @@ pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { let a: i32 = crc as i32; let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; let c: i32 = (data >> 32).as_signed() as i32; - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: i32, data: i32) -> i32; } ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32 } - #[doc = "CRC32-C single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch)"] #[doc = "## Safety"] @@ -108,7 +105,7 @@ pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32ch" @@ -118,7 +115,6 @@ pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { } ___crc32ch(crc.as_signed(), data.as_signed() as i32).as_unsigned() } - #[doc = "CRC32-C single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw)"] #[doc = "## Safety"] @@ -136,7 +132,7 @@ pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32cw" @@ -146,7 +142,6 @@ pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { } ___crc32cw(crc.as_signed(), data.as_signed()).as_unsigned() } - #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] #[doc = "## Safety"] @@ -163,13 +158,12 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { let a: i32 = crc as i32; let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; let c: i32 = (data >> 32).as_signed() as i32; - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: i32, data: i32) -> i32; } ___crc32w(___crc32w(a, b), c).as_unsigned() } - #[doc = "CRC32 single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h)"] #[doc = "## Safety"] @@ -187,7 +181,7 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32h" @@ -197,7 +191,6 @@ pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { } ___crc32h(crc.as_signed(), data.as_signed() as i32).as_unsigned() } - #[doc = "CRC32 single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w)"] #[doc = "## Safety"] @@ -215,7 +208,7 @@ pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { stable(feature = "stdarch_aarch64_crc32", since = "1.80.0") )] pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32w" @@ -225,7 +218,6 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { } ___crc32w(crc.as_signed(), data.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] #[doc = "## Safety"] @@ -240,13 +232,12 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i16.v8i8")] fn _priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } _priv_vpadal_s8(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] #[doc = "## Safety"] @@ -261,7 +252,7 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i16.v8i8")] fn _priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } @@ -270,7 +261,6 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { let ret_val: int16x4_t = _priv_vpadal_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] #[doc = "## Safety"] @@ -285,13 +275,12 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v8i16.v16i8")] fn _priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } _priv_vpadalq_s8(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] #[doc = "## Safety"] @@ -306,7 +295,7 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v8i16.v16i8")] fn _priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } @@ -315,7 +304,6 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = _priv_vpadalq_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] #[doc = "## Safety"] @@ -330,13 +318,12 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i32.v4i16")] fn _priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } _priv_vpadal_s16(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] #[doc = "## Safety"] @@ -351,7 +338,7 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i32.v4i16")] fn _priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } @@ -360,7 +347,6 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { let ret_val: int32x2_t = _priv_vpadal_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] #[doc = "## Safety"] @@ -375,13 +361,12 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i32.v8i16")] fn _priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } _priv_vpadalq_s16(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] #[doc = "## Safety"] @@ -396,7 +381,7 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i32.v8i16")] fn _priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } @@ -405,7 +390,6 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = _priv_vpadalq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] #[doc = "## Safety"] @@ -420,13 +404,12 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } _priv_vpadal_s32(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] #[doc = "## Safety"] @@ -441,14 +424,13 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); _priv_vpadal_s32(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] #[doc = "## Safety"] @@ -463,13 +445,12 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i64.v4i32")] fn _priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } _priv_vpadalq_s32(a, b) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] #[doc = "## Safety"] @@ -484,7 +465,7 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i64.v4i32")] fn _priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } @@ -493,7 +474,6 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = _priv_vpadalq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] #[doc = "## Safety"] @@ -508,13 +488,12 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] #[doc = "## Safety"] @@ -529,7 +508,7 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; } @@ -538,7 +517,6 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] #[doc = "## Safety"] @@ -553,13 +531,12 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] #[doc = "## Safety"] @@ -574,7 +551,7 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; } @@ -583,7 +560,6 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] #[doc = "## Safety"] @@ -598,13 +574,12 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] #[doc = "## Safety"] @@ -619,7 +594,7 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; } @@ -628,7 +603,6 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] #[doc = "## Safety"] @@ -643,13 +617,12 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] #[doc = "## Safety"] @@ -664,7 +637,7 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; } @@ -673,7 +646,6 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] #[doc = "## Safety"] @@ -688,13 +660,12 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] #[doc = "## Safety"] @@ -709,14 +680,13 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] #[doc = "## Safety"] @@ -731,13 +701,12 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] #[doc = "## Safety"] @@ -752,7 +721,7 @@ unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; } @@ -761,7 +730,6 @@ unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[doc = "## Safety"] @@ -788,7 +756,6 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let e: uint8x8_t = simd_cast(d); simd_add(a, simd_cast(e)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[doc = "## Safety"] @@ -819,7 +786,6 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_add(a, simd_cast(e)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] #[doc = "## Safety"] @@ -846,7 +812,6 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let e: uint16x4_t = simd_cast(d); simd_add(a, simd_cast(e)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] #[doc = "## Safety"] @@ -877,7 +842,6 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_add(a, simd_cast(e)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] #[doc = "## Safety"] @@ -904,7 +868,6 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let e: uint32x2_t = simd_cast(d); simd_add(a, simd_cast(e)) } - #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] #[doc = "## Safety"] @@ -935,7 +898,6 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_add(a, simd_cast(e)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] #[doc = "## Safety"] @@ -961,7 +923,6 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t let d: uint8x8_t = vabd_u8(b, c); simd_add(a, simd_cast(d)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] #[doc = "## Safety"] @@ -991,7 +952,6 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t let ret_val: uint16x8_t = simd_add(a, simd_cast(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] #[doc = "## Safety"] @@ -1017,7 +977,6 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 let d: uint16x4_t = vabd_u16(b, c); simd_add(a, simd_cast(d)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] #[doc = "## Safety"] @@ -1047,7 +1006,6 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_add(a, simd_cast(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] #[doc = "## Safety"] @@ -1073,7 +1031,6 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) } - #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] #[doc = "## Safety"] @@ -1103,7 +1060,6 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 let ret_val: uint64x2_t = simd_add(a, simd_cast(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] #[doc = "## Safety"] @@ -1126,7 +1082,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1136,7 +1092,6 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vabd_f32(a, b) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] #[doc = "## Safety"] @@ -1159,7 +1114,7 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1172,7 +1127,6 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vabd_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] #[doc = "## Safety"] @@ -1195,7 +1149,7 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1205,7 +1159,6 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vabdq_f32(a, b) } - #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] #[doc = "## Safety"] @@ -1228,7 +1181,7 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1241,7 +1194,6 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vabdq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] #[doc = "## Safety"] @@ -1264,7 +1216,7 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i8" @@ -1274,7 +1226,6 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vabd_s8(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] #[doc = "## Safety"] @@ -1297,7 +1248,7 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i8" @@ -1310,7 +1261,6 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vabd_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] #[doc = "## Safety"] @@ -1333,7 +1283,7 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v16i8" @@ -1343,7 +1293,6 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vabdq_s8(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] #[doc = "## Safety"] @@ -1366,7 +1315,7 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v16i8" @@ -1383,7 +1332,6 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] #[doc = "## Safety"] @@ -1406,7 +1354,7 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i16" @@ -1416,7 +1364,6 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vabd_s16(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] #[doc = "## Safety"] @@ -1439,7 +1386,7 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i16" @@ -1452,7 +1399,6 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vabd_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] #[doc = "## Safety"] @@ -1475,7 +1421,7 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i16" @@ -1485,7 +1431,6 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vabdq_s16(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] #[doc = "## Safety"] @@ -1508,7 +1453,7 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v8i16" @@ -1521,7 +1466,6 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vabdq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] #[doc = "## Safety"] @@ -1544,7 +1488,7 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v2i32" @@ -1554,7 +1498,6 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vabd_s32(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] #[doc = "## Safety"] @@ -1577,7 +1520,7 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v2i32" @@ -1590,7 +1533,6 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vabd_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] #[doc = "## Safety"] @@ -1613,7 +1555,7 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i32" @@ -1623,7 +1565,6 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vabdq_s32(a, b) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] #[doc = "## Safety"] @@ -1646,7 +1587,7 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sabd.v4i32" @@ -1659,7 +1600,6 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vabdq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] #[doc = "## Safety"] @@ -1682,7 +1622,7 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i8" @@ -1692,7 +1632,6 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] #[doc = "## Safety"] @@ -1715,7 +1654,7 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i8" @@ -1728,7 +1667,6 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] #[doc = "## Safety"] @@ -1751,7 +1689,7 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v16i8" @@ -1761,7 +1699,6 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] #[doc = "## Safety"] @@ -1784,7 +1721,7 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v16i8" @@ -1801,7 +1738,6 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] #[doc = "## Safety"] @@ -1824,7 +1760,7 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i16" @@ -1834,7 +1770,6 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] #[doc = "## Safety"] @@ -1857,7 +1792,7 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i16" @@ -1870,7 +1805,6 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] #[doc = "## Safety"] @@ -1893,7 +1827,7 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i16" @@ -1903,7 +1837,6 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] #[doc = "## Safety"] @@ -1926,7 +1859,7 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v8i16" @@ -1939,7 +1872,6 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] #[doc = "## Safety"] @@ -1962,7 +1894,7 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v2i32" @@ -1972,7 +1904,6 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] #[doc = "## Safety"] @@ -1995,7 +1926,7 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v2i32" @@ -2008,7 +1939,6 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] #[doc = "## Safety"] @@ -2031,7 +1961,7 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i32" @@ -2041,7 +1971,6 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] #[doc = "## Safety"] @@ -2064,7 +1993,7 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uabd.v4i32" @@ -2077,7 +2006,6 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] #[doc = "## Safety"] @@ -2103,7 +2031,6 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: uint8x8_t = simd_cast(vabd_s8(a, b)); simd_cast(c) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] #[doc = "## Safety"] @@ -2132,7 +2059,6 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_cast(c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] #[doc = "## Safety"] @@ -2158,7 +2084,6 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: uint16x4_t = simd_cast(vabd_s16(a, b)); simd_cast(c) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] #[doc = "## Safety"] @@ -2187,7 +2112,6 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_cast(c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] #[doc = "## Safety"] @@ -2213,7 +2137,6 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: uint32x2_t = simd_cast(vabd_s32(a, b)); simd_cast(c) } - #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] #[doc = "## Safety"] @@ -2242,7 +2165,6 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_cast(c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] #[doc = "## Safety"] @@ -2267,7 +2189,6 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { simd_cast(vabd_u8(a, b)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] #[doc = "## Safety"] @@ -2295,7 +2216,6 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_cast(vabd_u8(a, b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] #[doc = "## Safety"] @@ -2320,7 +2240,6 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { simd_cast(vabd_u16(a, b)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] #[doc = "## Safety"] @@ -2348,7 +2267,6 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_cast(vabd_u16(a, b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] #[doc = "## Safety"] @@ -2373,7 +2291,6 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { simd_cast(vabd_u32(a, b)) } - #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] #[doc = "## Safety"] @@ -2401,7 +2318,6 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_cast(vabd_u32(a, b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] #[doc = "## Safety"] @@ -2426,7 +2342,6 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { simd_fabs(a) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] #[doc = "## Safety"] @@ -2453,7 +2368,6 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_fabs(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] #[doc = "## Safety"] @@ -2478,7 +2392,6 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { simd_fabs(a) } - #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] #[doc = "## Safety"] @@ -2505,7 +2418,6 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_fabs(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] #[doc = "## Safety"] @@ -2528,7 +2440,7 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v8i8" @@ -2538,7 +2450,6 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { } _vabs_s8(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] #[doc = "## Safety"] @@ -2561,7 +2472,7 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v8i8" @@ -2573,7 +2484,6 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vabs_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] #[doc = "## Safety"] @@ -2596,7 +2506,7 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v16i8" @@ -2606,7 +2516,6 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { } _vabsq_s8(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] #[doc = "## Safety"] @@ -2629,7 +2538,7 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v16i8" @@ -2645,7 +2554,6 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] #[doc = "## Safety"] @@ -2668,7 +2576,7 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v4i16" @@ -2678,7 +2586,6 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { } _vabs_s16(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] #[doc = "## Safety"] @@ -2701,7 +2608,7 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v4i16" @@ -2713,7 +2620,6 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vabs_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] #[doc = "## Safety"] @@ -2736,7 +2642,7 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v8i16" @@ -2746,7 +2652,6 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { } _vabsq_s16(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] #[doc = "## Safety"] @@ -2769,7 +2674,7 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v8i16" @@ -2781,7 +2686,6 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vabsq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] #[doc = "## Safety"] @@ -2804,7 +2708,7 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v2i32" @@ -2814,7 +2718,6 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { } _vabs_s32(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] #[doc = "## Safety"] @@ -2837,7 +2740,7 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v2i32" @@ -2849,7 +2752,6 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vabs_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] #[doc = "## Safety"] @@ -2872,7 +2774,7 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v4i32" @@ -2882,7 +2784,6 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { } _vabsq_s32(a) } - #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] #[doc = "## Safety"] @@ -2905,7 +2806,7 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.abs.v4i32" @@ -2917,7 +2818,6 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vabsq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[doc = "## Safety"] @@ -2942,7 +2842,6 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[doc = "## Safety"] @@ -2970,7 +2869,6 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] #[doc = "## Safety"] @@ -2995,7 +2893,6 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] #[doc = "## Safety"] @@ -3027,7 +2924,6 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] #[doc = "## Safety"] @@ -3052,7 +2948,6 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] #[doc = "## Safety"] @@ -3080,7 +2975,6 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] #[doc = "## Safety"] @@ -3105,7 +2999,6 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] #[doc = "## Safety"] @@ -3133,7 +3026,6 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] #[doc = "## Safety"] @@ -3157,7 +3049,6 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] #[doc = "## Safety"] @@ -3182,7 +3073,6 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_xor(a, b) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] #[doc = "## Safety"] @@ -3210,7 +3100,6 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] #[doc = "## Safety"] @@ -3234,7 +3123,6 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { a ^ b } - #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[doc = "## Safety"] @@ -3253,7 +3141,7 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesd" @@ -3263,7 +3151,6 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { } _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned() } - #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[doc = "## Safety"] @@ -3282,7 +3169,7 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesd" @@ -3307,7 +3194,6 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] #[doc = "## Safety"] @@ -3326,7 +3212,7 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aese" @@ -3336,7 +3222,6 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { } _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned() } - #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] #[doc = "## Safety"] @@ -3355,7 +3240,7 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aese" @@ -3380,7 +3265,6 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "AES inverse mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] #[doc = "## Safety"] @@ -3399,7 +3283,7 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesimc" @@ -3409,7 +3293,6 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { } _vaesimcq_u8(data.as_signed()).as_unsigned() } - #[doc = "AES inverse mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] #[doc = "## Safety"] @@ -3428,7 +3311,7 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesimc" @@ -3448,7 +3331,6 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "AES mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] #[doc = "## Safety"] @@ -3467,7 +3349,7 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesmc" @@ -3477,7 +3359,6 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { } _vaesmcq_u8(data.as_signed()).as_unsigned() } - #[doc = "AES mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] #[doc = "## Safety"] @@ -3496,7 +3377,7 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.aesmc" @@ -3516,7 +3397,6 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[doc = "## Safety"] @@ -3541,7 +3421,6 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[doc = "## Safety"] @@ -3569,7 +3448,6 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[doc = "## Safety"] @@ -3594,7 +3472,6 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[doc = "## Safety"] @@ -3626,7 +3503,6 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[doc = "## Safety"] @@ -3651,7 +3527,6 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[doc = "## Safety"] @@ -3679,7 +3554,6 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[doc = "## Safety"] @@ -3704,7 +3578,6 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[doc = "## Safety"] @@ -3732,7 +3605,6 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[doc = "## Safety"] @@ -3757,7 +3629,6 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[doc = "## Safety"] @@ -3785,7 +3656,6 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[doc = "## Safety"] @@ -3810,7 +3680,6 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[doc = "## Safety"] @@ -3838,7 +3707,6 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] #[doc = "## Safety"] @@ -3862,7 +3730,6 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[doc = "## Safety"] @@ -3887,7 +3754,6 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[doc = "## Safety"] @@ -3915,7 +3781,6 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[doc = "## Safety"] @@ -3940,7 +3805,6 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[doc = "## Safety"] @@ -3968,7 +3832,6 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[doc = "## Safety"] @@ -3993,7 +3856,6 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[doc = "## Safety"] @@ -4025,7 +3887,6 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[doc = "## Safety"] @@ -4050,7 +3911,6 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[doc = "## Safety"] @@ -4078,7 +3938,6 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[doc = "## Safety"] @@ -4103,7 +3962,6 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[doc = "## Safety"] @@ -4131,7 +3989,6 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[doc = "## Safety"] @@ -4156,7 +4013,6 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[doc = "## Safety"] @@ -4184,7 +4040,6 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[doc = "## Safety"] @@ -4209,7 +4064,6 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[doc = "## Safety"] @@ -4237,7 +4091,6 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] #[doc = "## Safety"] @@ -4261,7 +4114,6 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[doc = "## Safety"] @@ -4286,7 +4138,6 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_and(a, b) } - #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[doc = "## Safety"] @@ -4314,7 +4165,6 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_and(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[doc = "## Safety"] @@ -4337,7 +4187,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4347,7 +4197,6 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } _vcage_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[doc = "## Safety"] @@ -4370,7 +4219,7 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4383,7 +4232,6 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcage_f32(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[doc = "## Safety"] @@ -4406,7 +4254,7 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4416,7 +4264,6 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } _vcageq_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[doc = "## Safety"] @@ -4439,7 +4286,7 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4452,7 +4299,6 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcageq_f32(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[doc = "## Safety"] @@ -4475,7 +4321,7 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4485,7 +4331,6 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } _vcagt_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[doc = "## Safety"] @@ -4508,7 +4353,7 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4521,7 +4366,6 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcagt_f32(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[doc = "## Safety"] @@ -4544,7 +4388,7 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4554,7 +4398,6 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } _vcagtq_f32(a, b).as_unsigned() } - #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[doc = "## Safety"] @@ -4577,7 +4420,7 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -4590,7 +4433,6 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcagtq_f32(a, b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[doc = "## Safety"] @@ -4615,7 +4457,6 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcage_f32(b, a) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[doc = "## Safety"] @@ -4643,7 +4484,6 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = vcage_f32(b, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[doc = "## Safety"] @@ -4668,7 +4508,6 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcageq_f32(b, a) } - #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[doc = "## Safety"] @@ -4696,7 +4535,6 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = vcageq_f32(b, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] #[doc = "## Safety"] @@ -4721,7 +4559,6 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcagt_f32(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] #[doc = "## Safety"] @@ -4749,7 +4586,6 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = vcagt_f32(b, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[doc = "## Safety"] @@ -4774,7 +4610,6 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcagtq_f32(b, a) } - #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[doc = "## Safety"] @@ -4802,7 +4637,6 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = vcagtq_f32(b, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[doc = "## Safety"] @@ -4827,7 +4661,6 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_eq(a, b) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[doc = "## Safety"] @@ -4855,7 +4688,6 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[doc = "## Safety"] @@ -4880,7 +4712,6 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_eq(a, b) } - #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[doc = "## Safety"] @@ -4908,7 +4739,6 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[doc = "## Safety"] @@ -4933,7 +4763,6 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[doc = "## Safety"] @@ -4961,7 +4790,6 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[doc = "## Safety"] @@ -4986,7 +4814,6 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[doc = "## Safety"] @@ -5018,7 +4845,6 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[doc = "## Safety"] @@ -5043,7 +4869,6 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[doc = "## Safety"] @@ -5071,7 +4896,6 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[doc = "## Safety"] @@ -5096,7 +4920,6 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[doc = "## Safety"] @@ -5124,7 +4947,6 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[doc = "## Safety"] @@ -5149,7 +4971,6 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[doc = "## Safety"] @@ -5177,7 +4998,6 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[doc = "## Safety"] @@ -5202,7 +5022,6 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[doc = "## Safety"] @@ -5230,7 +5049,6 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[doc = "## Safety"] @@ -5255,7 +5073,6 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[doc = "## Safety"] @@ -5283,7 +5100,6 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[doc = "## Safety"] @@ -5308,7 +5124,6 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[doc = "## Safety"] @@ -5340,7 +5155,6 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[doc = "## Safety"] @@ -5365,7 +5179,6 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[doc = "## Safety"] @@ -5393,7 +5206,6 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[doc = "## Safety"] @@ -5418,7 +5230,6 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[doc = "## Safety"] @@ -5446,7 +5257,6 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[doc = "## Safety"] @@ -5471,7 +5281,6 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[doc = "## Safety"] @@ -5499,7 +5308,6 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[doc = "## Safety"] @@ -5524,7 +5332,6 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[doc = "## Safety"] @@ -5552,7 +5359,6 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[doc = "## Safety"] @@ -5577,7 +5383,6 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[doc = "## Safety"] @@ -5605,7 +5410,6 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_eq(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[doc = "## Safety"] @@ -5630,7 +5434,6 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { simd_eq(a, b) } - #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[doc = "## Safety"] @@ -5662,7 +5465,6 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[doc = "## Safety"] @@ -5687,7 +5489,6 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_ge(a, b) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[doc = "## Safety"] @@ -5715,7 +5516,6 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[doc = "## Safety"] @@ -5740,7 +5540,6 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_ge(a, b) } - #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[doc = "## Safety"] @@ -5768,7 +5567,6 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[doc = "## Safety"] @@ -5793,7 +5591,6 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[doc = "## Safety"] @@ -5821,7 +5618,6 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[doc = "## Safety"] @@ -5846,7 +5642,6 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[doc = "## Safety"] @@ -5878,7 +5673,6 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[doc = "## Safety"] @@ -5903,7 +5697,6 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[doc = "## Safety"] @@ -5931,7 +5724,6 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[doc = "## Safety"] @@ -5956,7 +5748,6 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[doc = "## Safety"] @@ -5984,7 +5775,6 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[doc = "## Safety"] @@ -6009,7 +5799,6 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[doc = "## Safety"] @@ -6037,7 +5826,6 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[doc = "## Safety"] @@ -6062,7 +5850,6 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_ge(a, b) } - #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[doc = "## Safety"] @@ -6090,7 +5877,6 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[doc = "## Safety"] @@ -6115,7 +5901,6 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[doc = "## Safety"] @@ -6143,7 +5928,6 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[doc = "## Safety"] @@ -6168,7 +5952,6 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[doc = "## Safety"] @@ -6200,7 +5983,6 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[doc = "## Safety"] @@ -6225,7 +6007,6 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[doc = "## Safety"] @@ -6253,7 +6034,6 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[doc = "## Safety"] @@ -6278,7 +6058,6 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[doc = "## Safety"] @@ -6306,7 +6085,6 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[doc = "## Safety"] @@ -6331,7 +6109,6 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[doc = "## Safety"] @@ -6359,7 +6136,6 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[doc = "## Safety"] @@ -6384,7 +6160,6 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_ge(a, b) } - #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[doc = "## Safety"] @@ -6412,7 +6187,6 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ge(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[doc = "## Safety"] @@ -6437,7 +6211,6 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_gt(a, b) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[doc = "## Safety"] @@ -6465,7 +6238,6 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[doc = "## Safety"] @@ -6490,7 +6262,6 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_gt(a, b) } - #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[doc = "## Safety"] @@ -6518,7 +6289,6 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[doc = "## Safety"] @@ -6543,7 +6313,6 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[doc = "## Safety"] @@ -6571,7 +6340,6 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[doc = "## Safety"] @@ -6596,7 +6364,6 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[doc = "## Safety"] @@ -6628,7 +6395,6 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[doc = "## Safety"] @@ -6653,7 +6419,6 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[doc = "## Safety"] @@ -6681,7 +6446,6 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[doc = "## Safety"] @@ -6706,7 +6470,6 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[doc = "## Safety"] @@ -6734,7 +6497,6 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[doc = "## Safety"] @@ -6759,7 +6521,6 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[doc = "## Safety"] @@ -6787,7 +6548,6 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[doc = "## Safety"] @@ -6812,7 +6572,6 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } - #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[doc = "## Safety"] @@ -6840,7 +6599,6 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[doc = "## Safety"] @@ -6865,7 +6623,6 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[doc = "## Safety"] @@ -6893,7 +6650,6 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[doc = "## Safety"] @@ -6918,7 +6674,6 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[doc = "## Safety"] @@ -6950,7 +6705,6 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[doc = "## Safety"] @@ -6975,7 +6729,6 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[doc = "## Safety"] @@ -7003,7 +6756,6 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[doc = "## Safety"] @@ -7028,7 +6780,6 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[doc = "## Safety"] @@ -7056,7 +6807,6 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[doc = "## Safety"] @@ -7081,7 +6831,6 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[doc = "## Safety"] @@ -7109,7 +6858,6 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[doc = "## Safety"] @@ -7134,7 +6882,6 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_gt(a, b) } - #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[doc = "## Safety"] @@ -7162,7 +6909,6 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_gt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[doc = "## Safety"] @@ -7187,7 +6933,6 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_le(a, b) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[doc = "## Safety"] @@ -7215,7 +6960,6 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[doc = "## Safety"] @@ -7240,7 +6984,6 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_le(a, b) } - #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[doc = "## Safety"] @@ -7268,7 +7011,6 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[doc = "## Safety"] @@ -7293,7 +7035,6 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[doc = "## Safety"] @@ -7321,7 +7062,6 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[doc = "## Safety"] @@ -7346,7 +7086,6 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[doc = "## Safety"] @@ -7378,7 +7117,6 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[doc = "## Safety"] @@ -7403,7 +7141,6 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[doc = "## Safety"] @@ -7431,7 +7168,6 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[doc = "## Safety"] @@ -7456,7 +7192,6 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[doc = "## Safety"] @@ -7484,7 +7219,6 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[doc = "## Safety"] @@ -7509,7 +7243,6 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[doc = "## Safety"] @@ -7537,7 +7270,6 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[doc = "## Safety"] @@ -7562,7 +7294,6 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_le(a, b) } - #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[doc = "## Safety"] @@ -7590,7 +7321,6 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[doc = "## Safety"] @@ -7615,7 +7345,6 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[doc = "## Safety"] @@ -7643,7 +7372,6 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[doc = "## Safety"] @@ -7668,7 +7396,6 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[doc = "## Safety"] @@ -7700,7 +7427,6 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[doc = "## Safety"] @@ -7725,7 +7451,6 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[doc = "## Safety"] @@ -7753,7 +7478,6 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[doc = "## Safety"] @@ -7778,7 +7502,6 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[doc = "## Safety"] @@ -7806,7 +7529,6 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[doc = "## Safety"] @@ -7831,7 +7553,6 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[doc = "## Safety"] @@ -7859,7 +7580,6 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[doc = "## Safety"] @@ -7884,7 +7604,6 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_le(a, b) } - #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[doc = "## Safety"] @@ -7912,7 +7631,6 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_le(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[doc = "## Safety"] @@ -7935,7 +7653,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -7945,7 +7663,6 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { } _vcls_s8(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[doc = "## Safety"] @@ -7968,7 +7685,7 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -7980,7 +7697,6 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vcls_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[doc = "## Safety"] @@ -8003,7 +7719,7 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8013,7 +7729,6 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { } _vclsq_s8(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[doc = "## Safety"] @@ -8036,7 +7751,7 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8052,7 +7767,6 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[doc = "## Safety"] @@ -8075,7 +7789,7 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8085,7 +7799,6 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { } _vcls_s16(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[doc = "## Safety"] @@ -8108,7 +7821,7 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8120,7 +7833,6 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vcls_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[doc = "## Safety"] @@ -8143,7 +7855,7 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8153,7 +7865,6 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { } _vclsq_s16(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[doc = "## Safety"] @@ -8176,7 +7887,7 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8188,7 +7899,6 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vclsq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[doc = "## Safety"] @@ -8211,7 +7921,7 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8221,7 +7931,6 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { } _vcls_s32(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[doc = "## Safety"] @@ -8244,7 +7953,7 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8256,7 +7965,6 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcls_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[doc = "## Safety"] @@ -8279,7 +7987,7 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8289,7 +7997,6 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { } _vclsq_s32(a) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[doc = "## Safety"] @@ -8312,7 +8019,7 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -8324,7 +8031,6 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vclsq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[doc = "## Safety"] @@ -8349,7 +8055,6 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { vcls_s8(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[doc = "## Safety"] @@ -8376,7 +8081,6 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = vcls_s8(transmute(a)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[doc = "## Safety"] @@ -8401,7 +8105,6 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { vclsq_s8(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[doc = "## Safety"] @@ -8432,7 +8135,6 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[doc = "## Safety"] @@ -8457,7 +8159,6 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { vcls_s16(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[doc = "## Safety"] @@ -8484,7 +8185,6 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { let ret_val: int16x4_t = vcls_s16(transmute(a)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[doc = "## Safety"] @@ -8509,7 +8209,6 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { vclsq_s16(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[doc = "## Safety"] @@ -8536,7 +8235,6 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { let ret_val: int16x8_t = vclsq_s16(transmute(a)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[doc = "## Safety"] @@ -8561,7 +8259,6 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { vcls_s32(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[doc = "## Safety"] @@ -8588,7 +8285,6 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { let ret_val: int32x2_t = vcls_s32(transmute(a)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[doc = "## Safety"] @@ -8613,7 +8309,6 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { vclsq_s32(transmute(a)) } - #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[doc = "## Safety"] @@ -8640,7 +8335,6 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { let ret_val: int32x4_t = vclsq_s32(transmute(a)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[doc = "## Safety"] @@ -8665,7 +8359,6 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_lt(a, b) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[doc = "## Safety"] @@ -8693,7 +8386,6 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[doc = "## Safety"] @@ -8718,7 +8410,6 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_lt(a, b) } - #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[doc = "## Safety"] @@ -8746,7 +8437,6 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[doc = "## Safety"] @@ -8771,7 +8461,6 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[doc = "## Safety"] @@ -8799,7 +8488,6 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[doc = "## Safety"] @@ -8824,7 +8512,6 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[doc = "## Safety"] @@ -8856,7 +8543,6 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[doc = "## Safety"] @@ -8881,7 +8567,6 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[doc = "## Safety"] @@ -8909,7 +8594,6 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[doc = "## Safety"] @@ -8934,7 +8618,6 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[doc = "## Safety"] @@ -8962,7 +8645,6 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[doc = "## Safety"] @@ -8987,7 +8669,6 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[doc = "## Safety"] @@ -9015,7 +8696,6 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] #[doc = "## Safety"] @@ -9040,7 +8720,6 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_lt(a, b) } - #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] #[doc = "## Safety"] @@ -9068,7 +8747,6 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] #[doc = "## Safety"] @@ -9093,7 +8771,6 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] #[doc = "## Safety"] @@ -9121,7 +8798,6 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] #[doc = "## Safety"] @@ -9146,7 +8822,6 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] #[doc = "## Safety"] @@ -9178,7 +8853,6 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] #[doc = "## Safety"] @@ -9203,7 +8877,6 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] #[doc = "## Safety"] @@ -9231,7 +8904,6 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] #[doc = "## Safety"] @@ -9256,7 +8928,6 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] #[doc = "## Safety"] @@ -9284,7 +8955,6 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] #[doc = "## Safety"] @@ -9309,7 +8979,6 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] #[doc = "## Safety"] @@ -9337,7 +9006,6 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] #[doc = "## Safety"] @@ -9362,7 +9030,6 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_lt(a, b) } - #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] #[doc = "## Safety"] @@ -9390,7 +9057,6 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_lt(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[doc = "## Safety"] @@ -9413,7 +9079,7 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9423,7 +9089,6 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { } _vclz_s8(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[doc = "## Safety"] @@ -9446,7 +9111,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9458,7 +9123,6 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vclz_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[doc = "## Safety"] @@ -9481,7 +9145,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9491,7 +9155,6 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { } _vclzq_s8(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[doc = "## Safety"] @@ -9514,7 +9177,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9530,7 +9193,6 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[doc = "## Safety"] @@ -9553,7 +9215,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9563,7 +9225,6 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { } _vclz_s16(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[doc = "## Safety"] @@ -9586,7 +9247,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9598,7 +9259,6 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vclz_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[doc = "## Safety"] @@ -9621,7 +9281,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9631,7 +9291,6 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { } _vclzq_s16(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[doc = "## Safety"] @@ -9654,7 +9313,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9666,7 +9325,6 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vclzq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[doc = "## Safety"] @@ -9689,7 +9347,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9699,7 +9357,6 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { } _vclz_s32(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[doc = "## Safety"] @@ -9722,7 +9379,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9734,7 +9391,6 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vclz_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[doc = "## Safety"] @@ -9757,7 +9413,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9767,7 +9423,6 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { } _vclzq_s32(a) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[doc = "## Safety"] @@ -9790,7 +9445,7 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -9802,7 +9457,6 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vclzq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] @@ -9827,7 +9481,6 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { transmute(vclz_s16(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] @@ -9854,7 +9507,6 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] @@ -9879,7 +9531,6 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { transmute(vclzq_s16(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] @@ -9906,7 +9557,6 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] @@ -9931,7 +9581,6 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { transmute(vclz_s32(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] @@ -9958,7 +9607,6 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] @@ -9983,7 +9631,6 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { transmute(vclzq_s32(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] @@ -10010,7 +9657,6 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] @@ -10035,7 +9681,6 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { transmute(vclz_s8(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] @@ -10062,7 +9707,6 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] @@ -10087,7 +9731,6 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vclzq_s8(transmute(a))) } - #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] @@ -10118,7 +9761,6 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[doc = "## Safety"] @@ -10141,7 +9783,7 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ctpop.v8i8" @@ -10151,7 +9793,6 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { } _vcnt_s8(a) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[doc = "## Safety"] @@ -10174,7 +9815,7 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ctpop.v8i8" @@ -10186,7 +9827,6 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vcnt_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[doc = "## Safety"] @@ -10209,7 +9849,7 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ctpop.v16i8" @@ -10219,7 +9859,6 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { } _vcntq_s8(a) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[doc = "## Safety"] @@ -10242,7 +9881,7 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ctpop.v16i8" @@ -10258,7 +9897,6 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] @@ -10283,7 +9921,6 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { transmute(vcnt_s8(transmute(a))) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] @@ -10310,7 +9947,6 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] @@ -10335,7 +9971,6 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vcntq_s8(transmute(a))) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] @@ -10366,7 +10001,6 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] @@ -10391,7 +10025,6 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { transmute(vcnt_s8(transmute(a))) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] @@ -10418,7 +10051,6 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] @@ -10443,7 +10075,6 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vcntq_s8(transmute(a))) } - #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] @@ -10474,7 +10105,6 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[doc = "## Safety"] @@ -10495,7 +10125,6 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { simd_shuffle!(a, b, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[doc = "## Safety"] @@ -10519,7 +10148,6 @@ pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[doc = "## Safety"] @@ -10540,7 +10168,6 @@ pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[doc = "## Safety"] @@ -10569,7 +10196,6 @@ pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[doc = "## Safety"] @@ -10590,7 +10216,6 @@ pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[doc = "## Safety"] @@ -10614,7 +10239,6 @@ pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[doc = "## Safety"] @@ -10635,7 +10259,6 @@ pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { simd_shuffle!(a, b, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[doc = "## Safety"] @@ -10659,7 +10282,6 @@ pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[doc = "## Safety"] @@ -10680,7 +10302,6 @@ pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { simd_shuffle!(a, b, [0, 1]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[doc = "## Safety"] @@ -10702,7 +10323,6 @@ pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 1]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[doc = "## Safety"] @@ -10723,7 +10343,6 @@ pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[doc = "## Safety"] @@ -10752,7 +10371,6 @@ pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[doc = "## Safety"] @@ -10773,7 +10391,6 @@ pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[doc = "## Safety"] @@ -10797,7 +10414,6 @@ pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[doc = "## Safety"] @@ -10818,7 +10434,6 @@ pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { simd_shuffle!(a, b, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[doc = "## Safety"] @@ -10842,7 +10457,6 @@ pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[doc = "## Safety"] @@ -10863,7 +10477,6 @@ pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { simd_shuffle!(a, b, [0, 1]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[doc = "## Safety"] @@ -10885,7 +10498,6 @@ pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 1]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[doc = "## Safety"] @@ -10906,7 +10518,6 @@ pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[doc = "## Safety"] @@ -10935,7 +10546,6 @@ pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[doc = "## Safety"] @@ -10956,7 +10566,6 @@ pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[doc = "## Safety"] @@ -10980,7 +10589,6 @@ pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[doc = "## Safety"] @@ -11001,7 +10609,6 @@ pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 1]) } - #[doc = "Vector combine"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[doc = "## Safety"] @@ -11023,7 +10630,6 @@ pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 1]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] @@ -11048,7 +10654,6 @@ pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] @@ -11074,7 +10679,6 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] @@ -11099,7 +10703,6 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] @@ -11125,7 +10728,6 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] @@ -11150,7 +10752,6 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] @@ -11176,7 +10777,6 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] @@ -11201,7 +10801,6 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] @@ -11227,7 +10826,6 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] #[doc = "## Safety"] @@ -11251,7 +10849,6 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] @@ -11276,7 +10873,6 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] @@ -11302,7 +10898,6 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] @@ -11327,7 +10922,6 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] @@ -11353,7 +10947,6 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] @@ -11378,7 +10971,6 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] @@ -11404,7 +10996,6 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] #[doc = "## Safety"] @@ -11428,7 +11019,6 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] @@ -11453,7 +11043,6 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] @@ -11479,7 +11068,6 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] @@ -11504,7 +11092,6 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { transmute(a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] @@ -11530,7 +11117,6 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] #[doc = "## Safety"] @@ -11554,7 +11140,6 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { transmute(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[doc = "## Safety"] @@ -11579,7 +11164,6 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[doc = "## Safety"] @@ -11606,7 +11190,6 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[doc = "## Safety"] @@ -11631,7 +11214,6 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[doc = "## Safety"] @@ -11658,7 +11240,6 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[doc = "## Safety"] @@ -11683,7 +11264,6 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[doc = "## Safety"] @@ -11710,7 +11290,6 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[doc = "## Safety"] @@ -11735,7 +11314,6 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { simd_cast(a) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[doc = "## Safety"] @@ -11762,7 +11340,6 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_cast(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] @@ -11776,7 +11353,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" @@ -11785,7 +11362,6 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { } _vcvt_n_f32_s32(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] @@ -11799,7 +11375,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" @@ -11810,7 +11386,6 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] @@ -11824,7 +11399,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" @@ -11833,7 +11408,6 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { } _vcvtq_n_f32_s32(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] @@ -11847,7 +11421,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" @@ -11858,7 +11432,6 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] @@ -11872,7 +11445,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" @@ -11881,7 +11454,6 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { } _vcvt_n_f32_s32(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] @@ -11895,7 +11467,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" @@ -11906,7 +11478,6 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] @@ -11920,7 +11491,7 @@ pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" @@ -11929,7 +11500,6 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { } _vcvtq_n_f32_s32(a, N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] @@ -11943,7 +11513,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" @@ -11954,7 +11524,6 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] @@ -11968,7 +11537,7 @@ pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" @@ -11977,7 +11546,6 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { } _vcvt_n_f32_u32(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] @@ -11991,7 +11559,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" @@ -12002,7 +11570,6 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] @@ -12016,7 +11583,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" @@ -12025,7 +11592,6 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { } _vcvtq_n_f32_u32(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] @@ -12039,7 +11605,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" @@ -12050,7 +11616,6 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] @@ -12064,7 +11629,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" @@ -12073,7 +11638,6 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { } _vcvt_n_f32_u32(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] @@ -12087,7 +11651,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" @@ -12098,7 +11662,6 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] @@ -12112,7 +11675,7 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" @@ -12121,7 +11684,6 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { } _vcvtq_n_f32_u32(a.as_signed(), N) } - #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] @@ -12135,7 +11697,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" @@ -12146,7 +11708,6 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] @@ -12160,7 +11721,7 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" @@ -12169,7 +11730,6 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvt_n_s32_f32(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] @@ -12183,7 +11743,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" @@ -12194,7 +11754,6 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] @@ -12208,7 +11767,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" @@ -12217,7 +11776,6 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtq_n_s32_f32(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] @@ -12231,7 +11789,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" @@ -12242,7 +11800,6 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] @@ -12256,7 +11813,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" @@ -12265,7 +11822,6 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvt_n_s32_f32(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] #[doc = "## Safety"] @@ -12279,7 +11835,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" @@ -12290,7 +11846,6 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] @@ -12304,7 +11859,7 @@ pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" @@ -12313,7 +11868,6 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtq_n_s32_f32(a, N) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] #[doc = "## Safety"] @@ -12327,7 +11881,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" @@ -12338,7 +11892,6 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] @@ -12352,7 +11905,7 @@ pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" @@ -12361,7 +11914,6 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvt_n_u32_f32(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] @@ -12375,7 +11927,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" @@ -12386,7 +11938,6 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] @@ -12400,7 +11951,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" @@ -12409,7 +11960,6 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtq_n_u32_f32(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] @@ -12423,7 +11973,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" @@ -12434,7 +11984,6 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] @@ -12448,7 +11997,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" @@ -12457,7 +12006,6 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvt_n_u32_f32(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] #[doc = "## Safety"] @@ -12471,7 +12019,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" @@ -12482,7 +12030,6 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] @@ -12496,7 +12043,7 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" @@ -12505,7 +12052,6 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtq_n_u32_f32(a, N).as_unsigned() } - #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] #[doc = "## Safety"] @@ -12519,7 +12065,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" @@ -12530,7 +12076,6 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[doc = "## Safety"] @@ -12553,7 +12098,7 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12563,7 +12108,6 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { } _vcvt_s32_f32(a) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[doc = "## Safety"] @@ -12586,7 +12130,7 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12598,7 +12142,6 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vcvt_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[doc = "## Safety"] @@ -12621,7 +12164,7 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12631,7 +12174,6 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { } _vcvtq_s32_f32(a) } - #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[doc = "## Safety"] @@ -12654,7 +12196,7 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12666,7 +12208,6 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vcvtq_s32_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[doc = "## Safety"] @@ -12689,7 +12230,7 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12699,7 +12240,6 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { } _vcvt_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[doc = "## Safety"] @@ -12722,7 +12262,7 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12734,7 +12274,6 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vcvt_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[doc = "## Safety"] @@ -12757,7 +12296,7 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12767,7 +12306,6 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { } _vcvtq_u32_f32(a).as_unsigned() } - #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[doc = "## Safety"] @@ -12790,7 +12328,7 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12802,7 +12340,6 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vcvtq_u32_f32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[doc = "## Safety"] @@ -12831,7 +12368,6 @@ pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vdot_s32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[doc = "## Safety"] @@ -12864,7 +12400,6 @@ pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[doc = "## Safety"] @@ -12897,7 +12432,6 @@ pub unsafe fn vdotq_lane_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vdotq_s32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[doc = "## Safety"] @@ -12934,7 +12468,6 @@ pub unsafe fn vdotq_lane_s32( let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] #[doc = "## Safety"] @@ -12967,7 +12500,6 @@ pub unsafe fn vdot_lane_u32( let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vdot_u32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] #[doc = "## Safety"] @@ -13004,7 +12536,6 @@ pub unsafe fn vdot_lane_u32( let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] #[doc = "## Safety"] @@ -13037,7 +12568,6 @@ pub unsafe fn vdotq_lane_u32( let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vdotq_u32(a, b, transmute(c)) } - #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] #[doc = "## Safety"] @@ -13074,7 +12604,6 @@ pub unsafe fn vdotq_lane_u32( let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[doc = "## Safety"] @@ -13097,7 +12626,7 @@ pub unsafe fn vdotq_lane_u32( unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13107,7 +12636,6 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { } _vdot_s32(a, b, c) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[doc = "## Safety"] @@ -13130,7 +12658,7 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13144,7 +12672,6 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { let ret_val: int32x2_t = _vdot_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[doc = "## Safety"] @@ -13167,7 +12694,7 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13177,7 +12704,6 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { } _vdotq_s32(a, b, c) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[doc = "## Safety"] @@ -13200,7 +12726,7 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13214,7 +12740,6 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { let ret_val: int32x4_t = _vdotq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[doc = "## Safety"] @@ -13237,7 +12762,7 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13247,7 +12772,6 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t } _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[doc = "## Safety"] @@ -13270,7 +12794,7 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13284,7 +12808,6 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t let ret_val: uint32x2_t = _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[doc = "## Safety"] @@ -13307,7 +12830,7 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13317,7 +12840,6 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 } _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[doc = "## Safety"] @@ -13340,7 +12862,7 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13354,7 +12876,6 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 let ret_val: uint32x4_t = _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[doc = "## Safety"] @@ -13381,7 +12902,6 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[doc = "## Safety"] @@ -13410,7 +12930,6 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[doc = "## Safety"] @@ -13437,7 +12956,6 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[doc = "## Safety"] @@ -13466,7 +12984,6 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[doc = "## Safety"] @@ -13493,7 +13010,6 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[doc = "## Safety"] @@ -13522,7 +13038,6 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[doc = "## Safety"] @@ -13549,7 +13064,6 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[doc = "## Safety"] @@ -13578,7 +13092,6 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[doc = "## Safety"] @@ -13605,7 +13118,6 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[doc = "## Safety"] @@ -13634,7 +13146,6 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[doc = "## Safety"] @@ -13661,7 +13172,6 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[doc = "## Safety"] @@ -13690,7 +13200,6 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[doc = "## Safety"] @@ -13717,7 +13226,6 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[doc = "## Safety"] @@ -13746,7 +13254,6 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[doc = "## Safety"] @@ -13773,7 +13280,6 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[doc = "## Safety"] @@ -13802,7 +13308,6 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[doc = "## Safety"] @@ -13829,7 +13334,6 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[doc = "## Safety"] @@ -13858,7 +13362,6 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[doc = "## Safety"] @@ -13889,7 +13392,6 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[doc = "## Safety"] @@ -13922,7 +13424,6 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[doc = "## Safety"] @@ -13953,7 +13454,6 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[doc = "## Safety"] @@ -13986,7 +13486,6 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[doc = "## Safety"] @@ -14017,7 +13516,6 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[doc = "## Safety"] @@ -14050,7 +13548,6 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[doc = "## Safety"] @@ -14081,7 +13578,6 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[doc = "## Safety"] @@ -14114,7 +13610,6 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[doc = "## Safety"] @@ -14145,7 +13640,6 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[doc = "## Safety"] @@ -14178,7 +13672,6 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[doc = "## Safety"] @@ -14209,7 +13702,6 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[doc = "## Safety"] @@ -14242,7 +13734,6 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[doc = "## Safety"] @@ -14276,7 +13767,6 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[doc = "## Safety"] @@ -14316,7 +13806,6 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[doc = "## Safety"] @@ -14350,7 +13839,6 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[doc = "## Safety"] @@ -14390,7 +13878,6 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[doc = "## Safety"] @@ -14424,7 +13911,6 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[doc = "## Safety"] @@ -14464,7 +13950,6 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] #[doc = "## Safety"] @@ -14490,7 +13975,6 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { static_assert!(N == 0); a } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] #[doc = "## Safety"] @@ -14516,7 +14000,6 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N == 0); a } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[doc = "## Safety"] @@ -14543,7 +14026,6 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[doc = "## Safety"] @@ -14572,7 +14054,6 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[doc = "## Safety"] @@ -14599,7 +14080,6 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[doc = "## Safety"] @@ -14628,7 +14108,6 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[doc = "## Safety"] @@ -14655,7 +14134,6 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[doc = "## Safety"] @@ -14684,7 +14162,6 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[doc = "## Safety"] @@ -14711,7 +14188,6 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[doc = "## Safety"] @@ -14740,7 +14216,6 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[doc = "## Safety"] @@ -14767,7 +14242,6 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[doc = "## Safety"] @@ -14796,7 +14270,6 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[doc = "## Safety"] @@ -14823,7 +14296,6 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[doc = "## Safety"] @@ -14852,7 +14324,6 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[doc = "## Safety"] @@ -14879,7 +14350,6 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[doc = "## Safety"] @@ -14908,7 +14378,6 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[doc = "## Safety"] @@ -14935,7 +14404,6 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[doc = "## Safety"] @@ -14964,7 +14432,6 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[doc = "## Safety"] @@ -14991,7 +14458,6 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[doc = "## Safety"] @@ -15020,7 +14486,6 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[doc = "## Safety"] @@ -15051,7 +14516,6 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[doc = "## Safety"] @@ -15084,7 +14548,6 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[doc = "## Safety"] @@ -15115,7 +14578,6 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[doc = "## Safety"] @@ -15148,7 +14610,6 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[doc = "## Safety"] @@ -15179,7 +14640,6 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[doc = "## Safety"] @@ -15212,7 +14672,6 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[doc = "## Safety"] @@ -15243,7 +14702,6 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[doc = "## Safety"] @@ -15276,7 +14734,6 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[doc = "## Safety"] @@ -15307,7 +14764,6 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[doc = "## Safety"] @@ -15340,7 +14796,6 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[doc = "## Safety"] @@ -15371,7 +14826,6 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[doc = "## Safety"] @@ -15404,7 +14858,6 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[doc = "## Safety"] @@ -15438,7 +14891,6 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[doc = "## Safety"] @@ -15478,7 +14930,6 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[doc = "## Safety"] @@ -15512,7 +14963,6 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[doc = "## Safety"] @@ -15552,7 +15002,6 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[doc = "## Safety"] @@ -15586,7 +15035,6 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { ] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[doc = "## Safety"] @@ -15626,7 +15074,6 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[doc = "## Safety"] @@ -15653,7 +15100,6 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[doc = "## Safety"] @@ -15681,7 +15127,6 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[doc = "## Safety"] @@ -15708,7 +15153,6 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[doc = "## Safety"] @@ -15736,7 +15180,6 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); transmute::(simd_extract!(a, N as u32)) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[doc = "## Safety"] @@ -15763,7 +15206,6 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[doc = "## Safety"] @@ -15791,7 +15233,6 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[doc = "## Safety"] @@ -15818,7 +15259,6 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[doc = "## Safety"] @@ -15846,7 +15286,6 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[doc = "## Safety"] @@ -15873,7 +15312,6 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[doc = "## Safety"] @@ -15902,7 +15340,6 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[doc = "## Safety"] @@ -15929,7 +15366,6 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } - #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[doc = "## Safety"] @@ -15958,7 +15394,6 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[doc = "## Safety"] @@ -15983,7 +15418,6 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[doc = "## Safety"] @@ -16011,7 +15445,6 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[doc = "## Safety"] @@ -16036,7 +15469,6 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[doc = "## Safety"] @@ -16068,7 +15500,6 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[doc = "## Safety"] @@ -16093,7 +15524,6 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[doc = "## Safety"] @@ -16121,7 +15551,6 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[doc = "## Safety"] @@ -16146,7 +15575,6 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[doc = "## Safety"] @@ -16174,7 +15602,6 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[doc = "## Safety"] @@ -16199,7 +15626,6 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[doc = "## Safety"] @@ -16227,7 +15653,6 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[doc = "## Safety"] @@ -16252,7 +15677,6 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[doc = "## Safety"] @@ -16280,7 +15704,6 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] #[doc = "## Safety"] @@ -16304,7 +15727,6 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[doc = "## Safety"] @@ -16329,7 +15751,6 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[doc = "## Safety"] @@ -16357,7 +15778,6 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[doc = "## Safety"] @@ -16382,7 +15802,6 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[doc = "## Safety"] @@ -16410,7 +15829,6 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[doc = "## Safety"] @@ -16435,7 +15853,6 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[doc = "## Safety"] @@ -16467,7 +15884,6 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[doc = "## Safety"] @@ -16492,7 +15908,6 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[doc = "## Safety"] @@ -16520,7 +15935,6 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[doc = "## Safety"] @@ -16545,7 +15959,6 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[doc = "## Safety"] @@ -16573,7 +15986,6 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[doc = "## Safety"] @@ -16598,7 +16010,6 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[doc = "## Safety"] @@ -16626,7 +16037,6 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[doc = "## Safety"] @@ -16651,7 +16061,6 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[doc = "## Safety"] @@ -16679,7 +16088,6 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] #[doc = "## Safety"] @@ -16703,7 +16111,6 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[doc = "## Safety"] @@ -16728,7 +16135,6 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_xor(a, b) } - #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[doc = "## Safety"] @@ -16756,7 +16162,6 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_xor(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[doc = "## Safety"] @@ -16787,7 +16192,6 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[doc = "## Safety"] @@ -16821,7 +16225,6 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[doc = "## Safety"] @@ -16852,7 +16255,6 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[doc = "## Safety"] @@ -16886,7 +16288,6 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[doc = "## Safety"] @@ -16917,7 +16318,6 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[doc = "## Safety"] @@ -16951,7 +16351,6 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[doc = "## Safety"] @@ -16988,7 +16387,6 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[doc = "## Safety"] @@ -17028,7 +16426,6 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[doc = "## Safety"] @@ -17065,7 +16462,6 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[doc = "## Safety"] @@ -17105,7 +16501,6 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[doc = "## Safety"] @@ -17142,7 +16537,6 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[doc = "## Safety"] @@ -17182,7 +16576,6 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[doc = "## Safety"] @@ -17219,7 +16612,6 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[doc = "## Safety"] @@ -17259,7 +16651,6 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[doc = "## Safety"] @@ -17296,7 +16687,6 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[doc = "## Safety"] @@ -17336,7 +16726,6 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[doc = "## Safety"] @@ -17373,7 +16762,6 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[doc = "## Safety"] @@ -17413,7 +16801,6 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[doc = "## Safety"] @@ -17446,7 +16833,6 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[doc = "## Safety"] @@ -17482,7 +16868,6 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[doc = "## Safety"] @@ -17515,7 +16900,6 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[doc = "## Safety"] @@ -17551,7 +16935,6 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[doc = "## Safety"] @@ -17584,7 +16967,6 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[doc = "## Safety"] @@ -17620,7 +17002,6 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[doc = "## Safety"] @@ -17653,7 +17034,6 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[doc = "## Safety"] @@ -17689,7 +17069,6 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[doc = "## Safety"] @@ -17722,7 +17101,6 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[doc = "## Safety"] @@ -17758,7 +17136,6 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[doc = "## Safety"] @@ -17791,7 +17168,6 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[doc = "## Safety"] @@ -17827,7 +17203,6 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t }; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[doc = "## Safety"] @@ -17858,7 +17233,6 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[doc = "## Safety"] @@ -17892,7 +17266,6 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[doc = "## Safety"] @@ -17923,7 +17296,6 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[doc = "## Safety"] @@ -17957,7 +17329,6 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ }; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[doc = "## Safety"] @@ -18062,7 +17433,6 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[doc = "## Safety"] @@ -18174,7 +17544,6 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[doc = "## Safety"] @@ -18279,7 +17648,6 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[doc = "## Safety"] @@ -18391,7 +17759,6 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[doc = "## Safety"] @@ -18496,7 +17863,6 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t _ => unreachable_unchecked(), } } - #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[doc = "## Safety"] @@ -18608,7 +17974,6 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[doc = "## Safety"] @@ -18631,14 +17996,13 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; } _vfma_f32(b, c, a) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[doc = "## Safety"] @@ -18661,7 +18025,7 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; @@ -18672,7 +18036,6 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 let ret_val: float32x2_t = _vfma_f32(b, c, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[doc = "## Safety"] @@ -18695,14 +18058,13 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; } _vfmaq_f32(b, c, a) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[doc = "## Safety"] @@ -18725,7 +18087,7 @@ pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; @@ -18736,7 +18098,6 @@ pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float let ret_val: float32x4_t = _vfmaq_f32(b, c, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[doc = "## Safety"] @@ -18761,7 +18122,6 @@ pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfma_f32(a, b, vdup_n_f32_vfp4(c)) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[doc = "## Safety"] @@ -18789,7 +18149,6 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32_vfp4(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[doc = "## Safety"] @@ -18814,7 +18173,6 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } - #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[doc = "## Safety"] @@ -18842,7 +18200,6 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[doc = "## Safety"] @@ -18868,7 +18225,6 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 let b: float32x2_t = simd_neg(b); vfma_f32(a, b, c) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[doc = "## Safety"] @@ -18898,7 +18254,6 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 let ret_val: float32x2_t = vfma_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[doc = "## Safety"] @@ -18924,7 +18279,6 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float let b: float32x4_t = simd_neg(b); vfmaq_f32(a, b, c) } - #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[doc = "## Safety"] @@ -18954,7 +18308,6 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float let ret_val: float32x4_t = vfmaq_f32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[doc = "## Safety"] @@ -18979,7 +18332,6 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfms_f32(a, b, vdup_n_f32_vfp4(c)) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[doc = "## Safety"] @@ -19007,7 +18359,6 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32_vfp4(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[doc = "## Safety"] @@ -19032,7 +18383,6 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } - #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[doc = "## Safety"] @@ -19060,7 +18410,6 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[doc = "## Safety"] @@ -19083,7 +18432,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i8" @@ -19093,7 +18442,6 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vhadd_s8(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[doc = "## Safety"] @@ -19116,7 +18464,7 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i8" @@ -19129,7 +18477,6 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vhadd_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[doc = "## Safety"] @@ -19152,7 +18499,7 @@ pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v16i8" @@ -19162,7 +18509,6 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vhaddq_s8(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[doc = "## Safety"] @@ -19185,7 +18531,7 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v16i8" @@ -19202,7 +18548,6 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[doc = "## Safety"] @@ -19225,7 +18570,7 @@ pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i16" @@ -19235,7 +18580,6 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vhadd_s16(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[doc = "## Safety"] @@ -19258,7 +18602,7 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i16" @@ -19271,7 +18615,6 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vhadd_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[doc = "## Safety"] @@ -19294,7 +18637,7 @@ pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i16" @@ -19304,7 +18647,6 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vhaddq_s16(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[doc = "## Safety"] @@ -19327,7 +18669,7 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v8i16" @@ -19340,7 +18682,6 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vhaddq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[doc = "## Safety"] @@ -19363,7 +18704,7 @@ pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v2i32" @@ -19373,7 +18714,6 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vhadd_s32(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[doc = "## Safety"] @@ -19396,7 +18736,7 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v2i32" @@ -19409,7 +18749,6 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vhadd_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] #[doc = "## Safety"] @@ -19432,7 +18771,7 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i32" @@ -19442,7 +18781,6 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vhaddq_s32(a, b) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] #[doc = "## Safety"] @@ -19465,7 +18803,7 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shadd.v4i32" @@ -19478,7 +18816,6 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vhaddq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[doc = "## Safety"] @@ -19501,7 +18838,7 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i8" @@ -19511,7 +18848,6 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[doc = "## Safety"] @@ -19534,7 +18870,7 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i8" @@ -19547,7 +18883,6 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[doc = "## Safety"] @@ -19570,7 +18905,7 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v16i8" @@ -19580,7 +18915,6 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[doc = "## Safety"] @@ -19603,7 +18937,7 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v16i8" @@ -19620,7 +18954,6 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[doc = "## Safety"] @@ -19643,7 +18976,7 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i16" @@ -19653,7 +18986,6 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[doc = "## Safety"] @@ -19676,7 +19008,7 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i16" @@ -19689,7 +19021,6 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[doc = "## Safety"] @@ -19712,7 +19043,7 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i16" @@ -19722,7 +19053,6 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[doc = "## Safety"] @@ -19745,7 +19075,7 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v8i16" @@ -19758,7 +19088,6 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[doc = "## Safety"] @@ -19781,7 +19110,7 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v2i32" @@ -19791,7 +19120,6 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[doc = "## Safety"] @@ -19814,7 +19142,7 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v2i32" @@ -19827,7 +19155,6 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[doc = "## Safety"] @@ -19850,7 +19177,7 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i32" @@ -19860,7 +19187,6 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[doc = "## Safety"] @@ -19883,7 +19209,7 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhadd.v4i32" @@ -19896,7 +19222,6 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[doc = "## Safety"] @@ -19919,7 +19244,7 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i16" @@ -19929,7 +19254,6 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vhsub_s16(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[doc = "## Safety"] @@ -19952,7 +19276,7 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i16" @@ -19965,7 +19289,6 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vhsub_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[doc = "## Safety"] @@ -19988,7 +19311,7 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i16" @@ -19998,7 +19321,6 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vhsubq_s16(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[doc = "## Safety"] @@ -20021,7 +19343,7 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i16" @@ -20034,7 +19356,6 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vhsubq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[doc = "## Safety"] @@ -20057,7 +19378,7 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v2i32" @@ -20067,7 +19388,6 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vhsub_s32(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[doc = "## Safety"] @@ -20090,7 +19410,7 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v2i32" @@ -20103,7 +19423,6 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vhsub_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[doc = "## Safety"] @@ -20126,7 +19445,7 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i32" @@ -20136,7 +19455,6 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vhsubq_s32(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[doc = "## Safety"] @@ -20159,7 +19477,7 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v4i32" @@ -20172,7 +19490,6 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vhsubq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[doc = "## Safety"] @@ -20195,7 +19512,7 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i8" @@ -20205,7 +19522,6 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vhsub_s8(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[doc = "## Safety"] @@ -20228,7 +19544,7 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v8i8" @@ -20241,7 +19557,6 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vhsub_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[doc = "## Safety"] @@ -20264,7 +19579,7 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v16i8" @@ -20274,7 +19589,6 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vhsubq_s8(a, b) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[doc = "## Safety"] @@ -20297,7 +19611,7 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.shsub.v16i8" @@ -20314,7 +19628,6 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[doc = "## Safety"] @@ -20337,7 +19650,7 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i8" @@ -20347,7 +19660,6 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[doc = "## Safety"] @@ -20370,7 +19682,7 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i8" @@ -20383,7 +19695,6 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[doc = "## Safety"] @@ -20406,7 +19717,7 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v16i8" @@ -20416,7 +19727,6 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[doc = "## Safety"] @@ -20439,7 +19749,7 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v16i8" @@ -20456,7 +19766,6 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[doc = "## Safety"] @@ -20479,7 +19788,7 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i16" @@ -20489,7 +19798,6 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[doc = "## Safety"] @@ -20512,7 +19820,7 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i16" @@ -20525,7 +19833,6 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] #[doc = "## Safety"] @@ -20548,7 +19855,7 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i16" @@ -20558,7 +19865,6 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] #[doc = "## Safety"] @@ -20581,7 +19887,7 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v8i16" @@ -20594,7 +19900,6 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] #[doc = "## Safety"] @@ -20617,7 +19922,7 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v2i32" @@ -20627,7 +19932,6 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] #[doc = "## Safety"] @@ -20650,7 +19954,7 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v2i32" @@ -20663,7 +19967,6 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] #[doc = "## Safety"] @@ -20686,7 +19989,7 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i32" @@ -20696,7 +19999,6 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] #[doc = "## Safety"] @@ -20719,7 +20021,7 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uhsub.v4i32" @@ -20732,7 +20034,6 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] @@ -20749,7 +20050,6 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] @@ -20767,7 +20067,6 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { )); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] @@ -20784,7 +20083,6 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] @@ -20802,7 +20100,6 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] @@ -20819,7 +20116,6 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] @@ -20837,7 +20133,6 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] @@ -20854,7 +20149,6 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] @@ -20876,7 +20170,6 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] @@ -20893,7 +20186,6 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] @@ -20911,7 +20203,6 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] @@ -20928,7 +20219,6 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] @@ -20946,7 +20236,6 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] @@ -20963,7 +20252,6 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] @@ -20981,7 +20269,6 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { )); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] @@ -20998,7 +20285,6 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] @@ -21016,7 +20302,6 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] @@ -21032,7 +20317,6 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] @@ -21049,7 +20333,6 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] @@ -21067,7 +20350,6 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { )); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] @@ -21084,7 +20366,6 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] @@ -21102,7 +20383,6 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] @@ -21119,7 +20399,6 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] @@ -21141,7 +20420,6 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] @@ -21158,7 +20436,6 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] @@ -21176,7 +20453,6 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] @@ -21193,7 +20469,6 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] @@ -21211,7 +20486,6 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] @@ -21228,7 +20502,6 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { crate::mem::align_of::() as i32, )) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] @@ -21246,7 +20519,6 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { )); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] @@ -21269,7 +20541,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" @@ -21279,7 +20551,6 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { } _vld1_f32_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] @@ -21302,7 +20573,7 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" @@ -21315,7 +20586,6 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] @@ -21338,7 +20608,7 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" @@ -21348,7 +20618,6 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { } _vld1_f32_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] @@ -21371,7 +20640,7 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" @@ -21385,7 +20654,6 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] @@ -21408,7 +20676,7 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" @@ -21418,7 +20686,6 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { } _vld1_f32_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] @@ -21441,7 +20708,7 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" @@ -21456,7 +20723,6 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] @@ -21479,7 +20745,7 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" @@ -21489,7 +20755,6 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { } _vld1q_f32_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] @@ -21512,7 +20777,7 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" @@ -21525,7 +20790,6 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] @@ -21548,7 +20812,7 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" @@ -21558,7 +20822,6 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { } _vld1q_f32_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] @@ -21581,7 +20844,7 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" @@ -21595,7 +20858,6 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] @@ -21618,7 +20880,7 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" @@ -21628,7 +20890,6 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { } _vld1q_f32_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] @@ -21651,7 +20912,7 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" @@ -21666,7 +20927,6 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] @@ -21679,13 +20939,12 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { let a: *const i8 = ptr as *const i8; let b: i32 = crate::mem::align_of::() as i32; - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; } transmute(_vld1_v1i64(a, b)) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] #[doc = "## Safety"] @@ -21709,7 +20968,6 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] #[doc = "## Safety"] @@ -21733,7 +20991,6 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] #[doc = "## Safety"] @@ -21757,7 +21014,6 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] @@ -21782,7 +21038,6 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] @@ -21810,7 +21065,6 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] @@ -21835,7 +21089,6 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] @@ -21864,7 +21117,6 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] @@ -21889,7 +21141,6 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] @@ -21919,7 +21170,6 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] @@ -21933,7 +21183,6 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] @@ -21948,7 +21197,6 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { let ret_val: int8x8_t = vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] @@ -21962,7 +21210,6 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] @@ -21981,7 +21228,6 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] @@ -21995,7 +21241,6 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] @@ -22010,7 +21255,6 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { let ret_val: int16x4_t = vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] @@ -22024,7 +21268,6 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] @@ -22039,7 +21282,6 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { let ret_val: int16x8_t = vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] @@ -22053,7 +21295,6 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] @@ -22068,7 +21309,6 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { let ret_val: int32x2_t = vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] @@ -22082,7 +21322,6 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] @@ -22097,7 +21336,6 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { let ret_val: int32x4_t = vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] @@ -22110,7 +21348,6 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { vld1_v1i64(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] @@ -22124,7 +21361,6 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] @@ -22139,7 +21375,6 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { let ret_val: int64x2_t = vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] #[doc = "## Safety"] @@ -22162,7 +21397,7 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" @@ -22172,7 +21407,6 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { } _vld1_s8_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] #[doc = "## Safety"] @@ -22195,7 +21429,7 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" @@ -22208,7 +21442,6 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] @@ -22231,7 +21464,7 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" @@ -22241,7 +21474,6 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { } _vld1_s8_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] @@ -22264,7 +21496,7 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" @@ -22278,7 +21510,6 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] @@ -22301,7 +21532,7 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" @@ -22311,7 +21542,6 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { } _vld1_s8_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] @@ -22334,7 +21564,7 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" @@ -22349,7 +21579,6 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] @@ -22372,7 +21601,7 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" @@ -22382,7 +21611,6 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { } _vld1q_s8_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] @@ -22405,7 +21633,7 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" @@ -22426,7 +21654,6 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] @@ -22449,7 +21676,7 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" @@ -22459,7 +21686,6 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { } _vld1q_s8_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] @@ -22482,7 +21708,7 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" @@ -22508,7 +21734,6 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] @@ -22531,7 +21756,7 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" @@ -22541,7 +21766,6 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { } _vld1q_s8_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] @@ -22564,7 +21788,7 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" @@ -22595,7 +21819,6 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] @@ -22618,7 +21841,7 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" @@ -22628,7 +21851,6 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { } _vld1_s16_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] @@ -22651,7 +21873,7 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" @@ -22664,7 +21886,6 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] @@ -22687,7 +21908,7 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" @@ -22697,7 +21918,6 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { } _vld1_s16_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] @@ -22720,7 +21940,7 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" @@ -22734,7 +21954,6 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] @@ -22757,7 +21976,7 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" @@ -22767,7 +21986,6 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { } _vld1_s16_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] @@ -22790,7 +22008,7 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" @@ -22805,7 +22023,6 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] @@ -22828,7 +22045,7 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" @@ -22838,7 +22055,6 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { } _vld1q_s16_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] @@ -22861,7 +22077,7 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" @@ -22874,7 +22090,6 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] @@ -22897,7 +22112,7 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" @@ -22907,7 +22122,6 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { } _vld1q_s16_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] @@ -22930,7 +22144,7 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" @@ -22944,7 +22158,6 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] @@ -22967,7 +22180,7 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" @@ -22977,7 +22190,6 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { } _vld1q_s16_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] @@ -23000,7 +22212,7 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" @@ -23015,7 +22227,6 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] @@ -23038,7 +22249,7 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" @@ -23048,7 +22259,6 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { } _vld1_s32_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] @@ -23071,7 +22281,7 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" @@ -23084,7 +22294,6 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] @@ -23107,7 +22316,7 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" @@ -23117,7 +22326,6 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { } _vld1_s32_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] @@ -23140,7 +22348,7 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" @@ -23154,7 +22362,6 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] @@ -23177,7 +22384,7 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" @@ -23187,7 +22394,6 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { } _vld1_s32_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] @@ -23210,7 +22416,7 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" @@ -23225,7 +22431,6 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] @@ -23248,7 +22453,7 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" @@ -23258,7 +22463,6 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { } _vld1q_s32_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] @@ -23281,7 +22485,7 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" @@ -23294,7 +22498,6 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] @@ -23317,7 +22520,7 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" @@ -23327,7 +22530,6 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { } _vld1q_s32_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] @@ -23350,7 +22552,7 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" @@ -23364,7 +22566,6 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] @@ -23387,7 +22588,7 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" @@ -23397,7 +22598,6 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { } _vld1q_s32_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] @@ -23420,7 +22620,7 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" @@ -23435,7 +22635,6 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] #[doc = "## Safety"] @@ -23457,7 +22656,7 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0" @@ -23467,7 +22666,6 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { } _vld1_s64_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] #[doc = "## Safety"] @@ -23489,7 +22687,7 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0" @@ -23499,7 +22697,6 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { } _vld1_s64_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] #[doc = "## Safety"] @@ -23521,7 +22718,7 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0" @@ -23531,7 +22728,6 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { } _vld1_s64_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] @@ -23554,7 +22750,7 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" @@ -23564,7 +22760,6 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { } _vld1q_s64_x2(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] @@ -23587,7 +22782,7 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" @@ -23600,7 +22795,6 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] @@ -23623,7 +22817,7 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" @@ -23633,7 +22827,6 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { } _vld1q_s64_x3(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] @@ -23656,7 +22849,7 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" @@ -23670,7 +22863,6 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] @@ -23693,7 +22885,7 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" @@ -23703,7 +22895,6 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { } _vld1q_s64_x4(a) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] @@ -23726,7 +22917,7 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" @@ -23741,7 +22932,6 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] @@ -23766,7 +22956,6 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] @@ -23794,7 +22983,6 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] @@ -23819,7 +23007,6 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] @@ -23848,7 +23035,6 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] @@ -23873,7 +23059,6 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] @@ -23903,7 +23088,6 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] @@ -23928,7 +23112,6 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] @@ -23964,7 +23147,6 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] @@ -23989,7 +23171,6 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] @@ -24030,7 +23211,6 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] @@ -24055,7 +23235,6 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] @@ -24101,7 +23280,6 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] @@ -24126,7 +23304,6 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] @@ -24154,7 +23331,6 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] @@ -24179,7 +23355,6 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] @@ -24208,7 +23383,6 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] @@ -24233,7 +23407,6 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] @@ -24263,7 +23436,6 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] @@ -24288,7 +23460,6 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] @@ -24316,7 +23487,6 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] @@ -24341,7 +23511,6 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] @@ -24370,7 +23539,6 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] @@ -24395,7 +23563,6 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] @@ -24425,7 +23592,6 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] @@ -24450,7 +23616,6 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { transmute(vld1_s32_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] @@ -24478,7 +23643,6 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] @@ -24503,7 +23667,6 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { transmute(vld1_s32_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] @@ -24532,7 +23695,6 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] @@ -24557,7 +23719,6 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { transmute(vld1_s32_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] @@ -24587,7 +23748,6 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] @@ -24612,7 +23772,6 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { transmute(vld1q_s32_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] @@ -24640,7 +23799,6 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] @@ -24665,7 +23823,6 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { transmute(vld1q_s32_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] @@ -24694,7 +23851,6 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] @@ -24719,7 +23875,6 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { transmute(vld1q_s32_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] @@ -24749,7 +23904,6 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] #[doc = "## Safety"] @@ -24773,7 +23927,6 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] #[doc = "## Safety"] @@ -24797,7 +23950,6 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] #[doc = "## Safety"] @@ -24821,7 +23973,6 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] @@ -24846,7 +23997,6 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] @@ -24874,7 +24024,6 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] @@ -24899,7 +24048,6 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] @@ -24928,7 +24076,6 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] @@ -24953,7 +24100,6 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] @@ -24983,7 +24129,6 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] @@ -25008,7 +24153,6 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] @@ -25036,7 +24180,6 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] @@ -25061,7 +24204,6 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] @@ -25090,7 +24232,6 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] @@ -25115,7 +24256,6 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] @@ -25145,7 +24285,6 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] @@ -25170,7 +24309,6 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] @@ -25206,7 +24344,6 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] @@ -25231,7 +24368,6 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] @@ -25272,7 +24408,6 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] @@ -25297,7 +24432,6 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] @@ -25343,7 +24477,6 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { ); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] @@ -25368,7 +24501,6 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] @@ -25396,7 +24528,6 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] @@ -25421,7 +24552,6 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] @@ -25450,7 +24580,6 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] @@ -25475,7 +24604,6 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] @@ -25505,7 +24633,6 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] @@ -25530,7 +24657,6 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] @@ -25558,7 +24684,6 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] @@ -25583,7 +24708,6 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] @@ -25612,7 +24736,6 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] @@ -25637,7 +24760,6 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] @@ -25667,7 +24789,6 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v1i64)"] #[doc = "## Safety"] @@ -25677,13 +24798,12 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; } _vld1_v1i64(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] #[doc = "## Safety"] @@ -25694,13 +24814,12 @@ unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; } _vld1_v2f32(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] #[doc = "## Safety"] @@ -25711,14 +24830,13 @@ unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; } let ret_val: float32x2_t = _vld1_v2f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] #[doc = "## Safety"] @@ -25729,13 +24847,12 @@ unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; } _vld1_v2i32(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] #[doc = "## Safety"] @@ -25746,14 +24863,13 @@ unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; } let ret_val: int32x2_t = _vld1_v2i32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] #[doc = "## Safety"] @@ -25764,13 +24880,12 @@ unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; } _vld1_v4i16(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] #[doc = "## Safety"] @@ -25781,14 +24896,13 @@ unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; } let ret_val: int16x4_t = _vld1_v4i16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] #[doc = "## Safety"] @@ -25799,13 +24913,12 @@ unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; } _vld1_v8i8(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] #[doc = "## Safety"] @@ -25816,14 +24929,13 @@ unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; } let ret_val: int8x8_t = _vld1_v8i8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] #[doc = "## Safety"] @@ -25834,13 +24946,12 @@ unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; } _vld1q_v16i8(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] #[doc = "## Safety"] @@ -25851,7 +24962,7 @@ unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; } @@ -25862,7 +24973,6 @@ unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] #[doc = "## Safety"] @@ -25873,13 +24983,12 @@ unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; } _vld1q_v2i64(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] #[doc = "## Safety"] @@ -25890,14 +24999,13 @@ unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; } let ret_val: int64x2_t = _vld1q_v2i64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] #[doc = "## Safety"] @@ -25908,13 +25016,12 @@ unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; } _vld1q_v4f32(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] #[doc = "## Safety"] @@ -25925,14 +25032,13 @@ unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; } let ret_val: float32x4_t = _vld1q_v4f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] #[doc = "## Safety"] @@ -25943,13 +25049,12 @@ unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; } _vld1q_v4i32(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] #[doc = "## Safety"] @@ -25960,14 +25065,13 @@ unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; } let ret_val: int32x4_t = _vld1q_v4i32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] #[doc = "## Safety"] @@ -25978,13 +25082,12 @@ unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; } _vld1q_v8i16(a, b) } - #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] #[doc = "## Safety"] @@ -25995,14 +25098,13 @@ unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; } let ret_val: int16x8_t = _vld1q_v8i16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] @@ -26014,13 +25116,12 @@ unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } _vld2_dup_f32(a as *const i8, 4) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] @@ -26032,7 +25133,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } @@ -26041,7 +25142,6 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] @@ -26053,13 +25153,12 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } _vld2q_dup_f32(a as *const i8, 4) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] @@ -26071,7 +25170,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } @@ -26080,7 +25179,6 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] @@ -26092,13 +25190,12 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } _vld2_dup_s8(a as *const i8, 1) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] @@ -26110,7 +25207,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } @@ -26119,7 +25216,6 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] @@ -26131,13 +25227,12 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } _vld2q_dup_s8(a as *const i8, 1) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] @@ -26149,7 +25244,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } @@ -26166,7 +25261,6 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { ); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] @@ -26178,13 +25272,12 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } _vld2_dup_s16(a as *const i8, 2) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] @@ -26196,7 +25289,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } @@ -26205,7 +25298,6 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] @@ -26217,13 +25309,12 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } _vld2q_dup_s16(a as *const i8, 2) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] @@ -26235,7 +25326,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } @@ -26244,7 +25335,6 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] @@ -26256,13 +25346,12 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } _vld2_dup_s32(a as *const i8, 4) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] @@ -26274,7 +25363,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } @@ -26283,7 +25372,6 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] @@ -26295,13 +25383,12 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } _vld2q_dup_s32(a as *const i8, 4) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] @@ -26313,7 +25400,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } @@ -26322,7 +25409,6 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] @@ -26334,7 +25420,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" @@ -26343,7 +25429,6 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { } _vld2_dup_f32(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] @@ -26355,7 +25440,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" @@ -26367,7 +25452,6 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] @@ -26379,7 +25463,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" @@ -26388,7 +25472,6 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { } _vld2q_dup_f32(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] @@ -26400,7 +25483,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" @@ -26412,7 +25495,6 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] @@ -26424,7 +25506,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" @@ -26433,7 +25515,6 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { } _vld2_dup_s8(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] @@ -26445,7 +25526,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" @@ -26457,7 +25538,6 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] @@ -26469,7 +25549,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" @@ -26478,7 +25558,6 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { } _vld2q_dup_s8(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] @@ -26490,7 +25569,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" @@ -26510,7 +25589,6 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { ); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] @@ -26522,7 +25600,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" @@ -26531,7 +25609,6 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { } _vld2_dup_s16(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] @@ -26543,7 +25620,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" @@ -26555,7 +25632,6 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] @@ -26567,7 +25643,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" @@ -26576,7 +25652,6 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { } _vld2q_dup_s16(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] @@ -26588,7 +25663,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" @@ -26600,7 +25675,6 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] @@ -26612,7 +25686,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" @@ -26621,7 +25695,6 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { } _vld2_dup_s32(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] @@ -26633,7 +25706,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" @@ -26645,7 +25718,6 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] @@ -26657,7 +25729,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" @@ -26666,7 +25738,6 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { } _vld2q_dup_s32(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] @@ -26678,7 +25749,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" @@ -26690,7 +25761,6 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] #[doc = "## Safety"] @@ -26714,7 +25784,6 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] @@ -26725,13 +25794,12 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } _vld2_dup_s64(a as *const i8, 8) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] @@ -26742,7 +25810,7 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" @@ -26751,7 +25819,6 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { } _vld2_dup_s64(a as _) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] #[doc = "## Safety"] @@ -26775,7 +25842,6 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] @@ -26800,7 +25866,6 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] @@ -26828,7 +25893,6 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] @@ -26853,7 +25917,6 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] @@ -26889,7 +25952,6 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { ); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] @@ -26914,7 +25976,6 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] @@ -26942,7 +26003,6 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] @@ -26967,7 +26027,6 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] @@ -26995,7 +26054,6 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] @@ -27020,7 +26078,6 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_dup_s32(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] @@ -27048,7 +26105,6 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] @@ -27073,7 +26129,6 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_dup_s32(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] @@ -27101,7 +26156,6 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] @@ -27126,7 +26180,6 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] @@ -27154,7 +26207,6 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] @@ -27179,7 +26231,6 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] @@ -27215,7 +26266,6 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { ); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] @@ -27240,7 +26290,6 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] @@ -27268,7 +26317,6 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] @@ -27293,7 +26341,6 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } - #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] @@ -27321,7 +26368,6 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] @@ -27333,13 +26379,12 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } _vld2_f32(a as *const i8, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] @@ -27351,7 +26396,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } @@ -27360,7 +26405,6 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] @@ -27372,13 +26416,12 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } _vld2q_f32(a as *const i8, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] @@ -27390,7 +26433,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } @@ -27399,7 +26442,6 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] @@ -27411,13 +26453,12 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } _vld2_s8(a as *const i8, 1) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] @@ -27429,7 +26470,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } @@ -27438,7 +26479,6 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] @@ -27450,13 +26490,12 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } _vld2q_s8(a as *const i8, 1) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] @@ -27468,7 +26507,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } @@ -27485,7 +26524,6 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] @@ -27497,13 +26535,12 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } _vld2_s16(a as *const i8, 2) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] @@ -27515,7 +26552,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } @@ -27524,7 +26561,6 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] @@ -27536,13 +26572,12 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } _vld2q_s16(a as *const i8, 2) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] @@ -27554,7 +26589,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } @@ -27563,7 +26598,6 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] @@ -27575,13 +26609,12 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } _vld2_s32(a as *const i8, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] @@ -27593,7 +26626,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } @@ -27602,7 +26635,6 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] @@ -27614,13 +26646,12 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } _vld2q_s32(a as *const i8, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] @@ -27632,7 +26663,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } @@ -27641,7 +26672,6 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] @@ -27653,7 +26683,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f32.p0" @@ -27662,7 +26692,6 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { } _vld2_f32(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] @@ -27674,7 +26703,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f32.p0" @@ -27686,7 +26715,6 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] @@ -27698,7 +26726,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4f32.p0" @@ -27707,7 +26735,6 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { } _vld2q_f32(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] @@ -27719,7 +26746,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4f32.p0" @@ -27731,7 +26758,6 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] @@ -27743,7 +26769,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i8.p0" @@ -27752,7 +26778,6 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { } _vld2_s8(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] @@ -27764,7 +26789,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i8.p0" @@ -27776,7 +26801,6 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] @@ -27788,7 +26812,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v16i8.p0" @@ -27797,7 +26821,6 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { } _vld2q_s8(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] @@ -27809,7 +26832,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v16i8.p0" @@ -27829,7 +26852,6 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] @@ -27841,7 +26863,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i16.p0" @@ -27850,7 +26872,6 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { } _vld2_s16(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] @@ -27862,7 +26883,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i16.p0" @@ -27874,7 +26895,6 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] @@ -27886,7 +26906,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i16.p0" @@ -27895,7 +26915,6 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { } _vld2q_s16(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] @@ -27907,7 +26926,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v8i16.p0" @@ -27919,7 +26938,6 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] @@ -27931,7 +26949,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i32.p0" @@ -27940,7 +26958,6 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { } _vld2_s32(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] @@ -27952,7 +26969,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i32.p0" @@ -27964,7 +26981,6 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] @@ -27976,7 +26992,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i32.p0" @@ -27985,7 +27001,6 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { } _vld2q_s32(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] @@ -27997,7 +27012,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v4i32.p0" @@ -28009,7 +27024,6 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] @@ -28023,7 +27037,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" @@ -28032,7 +27046,6 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> } _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] @@ -28046,7 +27059,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" @@ -28061,7 +27074,6 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] @@ -28075,7 +27087,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" @@ -28085,7 +27097,6 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - } _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] @@ -28099,7 +27110,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" @@ -28115,7 +27126,6 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] @@ -28129,7 +27139,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" @@ -28138,7 +27148,6 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 } _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] @@ -28152,7 +27161,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" @@ -28167,7 +27176,6 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] @@ -28181,7 +27189,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" @@ -28190,7 +27198,6 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i } _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] @@ -28204,7 +27211,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" @@ -28219,7 +27226,6 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] @@ -28233,7 +27239,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" @@ -28242,7 +27248,6 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> } _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] @@ -28256,7 +27261,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" @@ -28271,7 +27276,6 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] @@ -28285,7 +27289,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" @@ -28294,7 +27298,6 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i } _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] @@ -28308,7 +27311,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" @@ -28323,7 +27326,6 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] @@ -28337,7 +27339,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" @@ -28346,7 +27348,6 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> } _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] @@ -28360,7 +27361,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" @@ -28375,7 +27376,6 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] @@ -28389,7 +27389,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] fn _vld2_lane_f32( ptr: *const i8, @@ -28401,7 +27401,6 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> } _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] @@ -28415,7 +27414,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] fn _vld2_lane_f32( ptr: *const i8, @@ -28433,7 +27432,6 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] @@ -28447,7 +27445,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] fn _vld2q_lane_f32( ptr: *const i8, @@ -28459,7 +27457,6 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - } _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] @@ -28473,7 +27470,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] fn _vld2q_lane_f32( ptr: *const i8, @@ -28491,7 +27488,6 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] @@ -28505,7 +27501,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] fn _vld2q_lane_s16( ptr: *const i8, @@ -28517,7 +27513,6 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> } _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] @@ -28531,7 +27526,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] fn _vld2q_lane_s16( ptr: *const i8, @@ -28549,7 +27544,6 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] @@ -28563,7 +27557,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] fn _vld2q_lane_s32( ptr: *const i8, @@ -28575,7 +27569,6 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> } _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] @@ -28589,7 +27582,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] fn _vld2q_lane_s32( ptr: *const i8, @@ -28607,7 +27600,6 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] @@ -28621,14 +27613,13 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t; } _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] @@ -28642,7 +27633,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t; @@ -28655,7 +27646,6 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] @@ -28669,7 +27659,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] fn _vld2_lane_s16( ptr: *const i8, @@ -28681,7 +27671,6 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i } _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] @@ -28695,7 +27684,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] fn _vld2_lane_s16( ptr: *const i8, @@ -28713,7 +27702,6 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] @@ -28727,7 +27715,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] fn _vld2_lane_s32( ptr: *const i8, @@ -28739,7 +27727,6 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i } _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] @@ -28753,7 +27740,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] fn _vld2_lane_s32( ptr: *const i8, @@ -28771,7 +27758,6 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] @@ -28798,7 +27784,6 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] @@ -28831,7 +27816,6 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] @@ -28858,7 +27842,6 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] @@ -28891,7 +27874,6 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] @@ -28918,7 +27900,6 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] @@ -28951,7 +27932,6 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] @@ -28978,7 +27958,6 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld2_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] @@ -29011,7 +27990,6 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] @@ -29038,7 +28016,6 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld2q_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] @@ -29071,7 +28048,6 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] @@ -29098,7 +28074,6 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] @@ -29131,7 +28106,6 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] @@ -29158,7 +28132,6 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] @@ -29191,7 +28164,6 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] @@ -29218,7 +28190,6 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] @@ -29251,7 +28222,6 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] #[doc = "## Safety"] @@ -29275,7 +28245,6 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_s64(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] @@ -29286,13 +28255,12 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } _vld2_s64(a as *const i8, 8) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] @@ -29303,7 +28271,7 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v1i64.p0" @@ -29312,7 +28280,6 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { } _vld2_s64(a as _) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] #[doc = "## Safety"] @@ -29336,7 +28303,6 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_s64(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] @@ -29361,7 +28327,6 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] @@ -29389,7 +28354,6 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] @@ -29414,7 +28378,6 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_s8(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] @@ -29450,7 +28413,6 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] @@ -29475,7 +28437,6 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_s16(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] @@ -29503,7 +28464,6 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] @@ -29528,7 +28488,6 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_s16(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] @@ -29556,7 +28515,6 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] @@ -29581,7 +28539,6 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_s32(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] @@ -29609,7 +28566,6 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] @@ -29634,7 +28590,6 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_s32(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] @@ -29662,7 +28617,6 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] @@ -29687,7 +28641,6 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_s8(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] @@ -29715,7 +28668,6 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] @@ -29740,7 +28692,6 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_s8(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] @@ -29776,7 +28727,6 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { ); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] @@ -29801,7 +28751,6 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_s16(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] @@ -29829,7 +28778,6 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] @@ -29854,7 +28802,6 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_s16(transmute(a))) } - #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] @@ -29882,7 +28829,6 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] @@ -29894,7 +28840,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" @@ -29903,7 +28849,6 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { } _vld3_dup_f32(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] @@ -29915,7 +28860,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" @@ -29928,7 +28873,6 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] @@ -29940,7 +28884,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" @@ -29949,7 +28893,6 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { } _vld3q_dup_f32(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] @@ -29961,7 +28904,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" @@ -29974,7 +28917,6 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] @@ -29986,7 +28928,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" @@ -29995,7 +28937,6 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { } _vld3_dup_s8(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] @@ -30007,7 +28948,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" @@ -30020,7 +28961,6 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] @@ -30032,7 +28972,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" @@ -30041,7 +28981,6 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { } _vld3q_dup_s8(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] @@ -30053,7 +28992,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" @@ -30078,7 +29017,6 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { ); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] @@ -30090,7 +29028,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" @@ -30099,7 +29037,6 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { } _vld3_dup_s16(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] @@ -30111,7 +29048,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" @@ -30124,7 +29061,6 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] @@ -30136,7 +29072,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" @@ -30145,7 +29081,6 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { } _vld3q_dup_s16(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] @@ -30157,7 +29092,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" @@ -30170,7 +29105,6 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] @@ -30182,7 +29116,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" @@ -30191,7 +29125,6 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { } _vld3_dup_s32(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] @@ -30203,7 +29136,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" @@ -30216,7 +29149,6 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] @@ -30228,7 +29160,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" @@ -30237,7 +29169,6 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { } _vld3q_dup_s32(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] @@ -30249,7 +29180,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" @@ -30262,7 +29193,6 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] #[doc = "## Safety"] @@ -30273,7 +29203,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" @@ -30282,7 +29212,6 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { } _vld3_dup_s64(a as _) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] @@ -30294,13 +29223,12 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } _vld3_dup_f32(a as *const i8, 4) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] @@ -30312,7 +29240,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } @@ -30322,7 +29250,6 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] @@ -30334,13 +29261,12 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } _vld3q_dup_f32(a as *const i8, 4) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] @@ -30352,7 +29278,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } @@ -30362,7 +29288,6 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] @@ -30374,13 +29299,12 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } _vld3_dup_s8(a as *const i8, 1) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] @@ -30392,7 +29316,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } @@ -30402,7 +29326,6 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] @@ -30414,13 +29337,12 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } _vld3q_dup_s8(a as *const i8, 1) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] @@ -30432,7 +29354,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } @@ -30454,7 +29376,6 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { ); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] @@ -30466,13 +29387,12 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } _vld3_dup_s16(a as *const i8, 2) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] @@ -30484,7 +29404,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } @@ -30494,7 +29414,6 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] @@ -30506,13 +29425,12 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } _vld3q_dup_s16(a as *const i8, 2) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] @@ -30524,7 +29442,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } @@ -30534,7 +29452,6 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] @@ -30546,13 +29463,12 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } _vld3_dup_s32(a as *const i8, 4) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] @@ -30564,7 +29480,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } @@ -30574,7 +29490,6 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] @@ -30586,13 +29501,12 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } _vld3q_dup_s32(a as *const i8, 4) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] @@ -30604,7 +29518,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } @@ -30614,7 +29528,6 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] #[doc = "## Safety"] @@ -30638,7 +29551,6 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] #[doc = "## Safety"] @@ -30649,13 +29561,12 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } _vld3_dup_s64(a as *const i8, 8) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] #[doc = "## Safety"] @@ -30679,7 +29590,6 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] @@ -30704,7 +29614,6 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] @@ -30733,7 +29642,6 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] @@ -30758,7 +29666,6 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] @@ -30799,7 +29706,6 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { ); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] @@ -30824,7 +29730,6 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] @@ -30853,7 +29758,6 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] @@ -30878,7 +29782,6 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] @@ -30907,7 +29810,6 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] @@ -30932,7 +29834,6 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_dup_s32(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] @@ -30961,7 +29862,6 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] @@ -30986,7 +29886,6 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_dup_s32(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] @@ -31015,7 +29914,6 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] @@ -31040,7 +29938,6 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] @@ -31069,7 +29966,6 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] @@ -31094,7 +29990,6 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] @@ -31135,7 +30030,6 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { ); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] @@ -31160,7 +30054,6 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] @@ -31189,7 +30082,6 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] @@ -31214,7 +30106,6 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } - #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] @@ -31243,7 +30134,6 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] @@ -31255,7 +30145,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f32.p0" @@ -31264,7 +30154,6 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { } _vld3_f32(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] @@ -31276,7 +30165,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f32.p0" @@ -31289,7 +30178,6 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] @@ -31301,7 +30189,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4f32.p0" @@ -31310,7 +30198,6 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { } _vld3q_f32(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] @@ -31322,7 +30209,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4f32.p0" @@ -31335,7 +30222,6 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] @@ -31347,7 +30233,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i8.p0" @@ -31356,7 +30242,6 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { } _vld3_s8(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] @@ -31368,7 +30253,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i8.p0" @@ -31381,7 +30266,6 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] @@ -31393,7 +30277,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v16i8.p0" @@ -31402,7 +30286,6 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { } _vld3q_s8(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] @@ -31414,7 +30297,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v16i8.p0" @@ -31439,7 +30322,6 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] @@ -31451,7 +30333,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i16.p0" @@ -31460,7 +30342,6 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { } _vld3_s16(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] @@ -31472,7 +30353,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i16.p0" @@ -31485,7 +30366,6 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] @@ -31497,7 +30377,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i16.p0" @@ -31506,7 +30386,6 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { } _vld3q_s16(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] @@ -31518,7 +30397,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v8i16.p0" @@ -31531,7 +30410,6 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] @@ -31543,7 +30421,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i32.p0" @@ -31552,7 +30430,6 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { } _vld3_s32(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] @@ -31564,7 +30441,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i32.p0" @@ -31577,7 +30454,6 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] @@ -31589,7 +30465,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i32.p0" @@ -31598,7 +30474,6 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { } _vld3q_s32(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] @@ -31610,7 +30485,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v4i32.p0" @@ -31623,7 +30498,6 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] @@ -31635,13 +30509,12 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } _vld3_f32(a as *const i8, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] @@ -31653,7 +30526,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } @@ -31663,7 +30536,6 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] @@ -31675,13 +30547,12 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } _vld3q_f32(a as *const i8, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] @@ -31693,7 +30564,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } @@ -31703,7 +30574,6 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] @@ -31715,13 +30585,12 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } _vld3_s8(a as *const i8, 1) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] @@ -31733,7 +30602,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } @@ -31743,7 +30612,6 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] @@ -31755,13 +30623,12 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } _vld3q_s8(a as *const i8, 1) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] @@ -31773,7 +30640,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } @@ -31795,7 +30662,6 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] @@ -31807,13 +30673,12 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } _vld3_s16(a as *const i8, 2) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] @@ -31825,7 +30690,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } @@ -31835,7 +30700,6 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] @@ -31847,13 +30711,12 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } _vld3q_s16(a as *const i8, 2) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] @@ -31865,7 +30728,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } @@ -31875,7 +30738,6 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] @@ -31887,13 +30749,12 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } _vld3_s32(a as *const i8, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] @@ -31905,7 +30766,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; } @@ -31915,7 +30776,6 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] @@ -31927,13 +30787,12 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } _vld3q_s32(a as *const i8, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] @@ -31945,7 +30804,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } @@ -31955,7 +30814,6 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] @@ -31969,7 +30827,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" @@ -31984,7 +30842,6 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> } _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] @@ -31998,7 +30855,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" @@ -32021,7 +30878,6 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] @@ -32035,7 +30891,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" @@ -32050,7 +30906,6 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - } _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] @@ -32064,7 +30919,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" @@ -32087,7 +30942,6 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] @@ -32101,7 +30955,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] fn _vld3_lane_f32( ptr: *const i8, @@ -32114,7 +30968,6 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> } _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] @@ -32128,7 +30981,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] fn _vld3_lane_f32( ptr: *const i8, @@ -32149,7 +31002,6 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] @@ -32163,7 +31015,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" @@ -32178,7 +31030,6 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 } _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] @@ -32192,7 +31043,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" @@ -32215,7 +31066,6 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] @@ -32229,7 +31079,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" @@ -32244,7 +31094,6 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i } _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] @@ -32258,7 +31107,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" @@ -32281,7 +31130,6 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] @@ -32295,7 +31143,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" @@ -32310,7 +31158,6 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> } _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] @@ -32324,7 +31171,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" @@ -32347,7 +31194,6 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] @@ -32361,7 +31207,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" @@ -32376,7 +31222,6 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i } _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] @@ -32390,7 +31235,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" @@ -32413,7 +31258,6 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] @@ -32427,7 +31271,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" @@ -32442,7 +31286,6 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> } _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] @@ -32456,7 +31299,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" @@ -32479,7 +31322,6 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] @@ -32493,7 +31335,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] fn _vld3_lane_s8( ptr: *const i8, @@ -32506,7 +31348,6 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 } _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] @@ -32520,7 +31361,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] fn _vld3_lane_s8( ptr: *const i8, @@ -32541,7 +31382,6 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] @@ -32555,7 +31395,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] fn _vld3_lane_s16( ptr: *const i8, @@ -32568,7 +31408,6 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i } _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] @@ -32582,7 +31421,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] fn _vld3_lane_s16( ptr: *const i8, @@ -32603,7 +31442,6 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] @@ -32617,7 +31455,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] fn _vld3q_lane_s16( ptr: *const i8, @@ -32630,7 +31468,6 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> } _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] @@ -32644,7 +31481,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] fn _vld3q_lane_s16( ptr: *const i8, @@ -32665,7 +31502,6 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] @@ -32679,7 +31515,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] fn _vld3_lane_s32( ptr: *const i8, @@ -32692,7 +31528,6 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i } _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] @@ -32706,7 +31541,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] fn _vld3_lane_s32( ptr: *const i8, @@ -32727,7 +31562,6 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] @@ -32741,7 +31575,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] fn _vld3q_lane_s32( ptr: *const i8, @@ -32754,7 +31588,6 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> } _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Load multiple 3-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] @@ -32768,7 +31601,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] fn _vld3q_lane_s32( ptr: *const i8, @@ -32789,7 +31622,6 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] @@ -32816,7 +31648,6 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] @@ -32851,7 +31682,6 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] @@ -32878,7 +31708,6 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] @@ -32913,7 +31742,6 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] @@ -32940,7 +31768,6 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] @@ -32975,7 +31802,6 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] @@ -33002,7 +31828,6 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld3_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] @@ -33037,7 +31862,6 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] @@ -33064,7 +31888,6 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] @@ -33099,7 +31922,6 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] @@ -33126,7 +31948,6 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] @@ -33161,7 +31982,6 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] @@ -33188,7 +32008,6 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] @@ -33223,7 +32042,6 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] @@ -33250,7 +32068,6 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] @@ -33285,7 +32102,6 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] #[doc = "## Safety"] @@ -33309,7 +32125,6 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_s64(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] @@ -33320,7 +32135,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v1i64.p0" @@ -33329,7 +32144,6 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { } _vld3_s64(a as _) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] @@ -33340,13 +32154,12 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } _vld3_s64(a as *const i8, 8) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] #[doc = "## Safety"] @@ -33370,7 +32183,6 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_s64(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] @@ -33395,7 +32207,6 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] @@ -33424,7 +32235,6 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] @@ -33449,7 +32259,6 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_s8(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] @@ -33490,7 +32299,6 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] @@ -33515,7 +32323,6 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_s16(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] @@ -33544,7 +32351,6 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] @@ -33569,7 +32375,6 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_s16(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] @@ -33598,7 +32403,6 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] @@ -33623,7 +32427,6 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_s32(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] @@ -33652,7 +32455,6 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] @@ -33677,7 +32479,6 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_s32(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] @@ -33706,7 +32507,6 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] @@ -33731,7 +32531,6 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_s8(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] @@ -33760,7 +32559,6 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] @@ -33785,7 +32583,6 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_s8(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] @@ -33826,7 +32623,6 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { ); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] @@ -33851,7 +32647,6 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_s16(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] @@ -33880,7 +32675,6 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] @@ -33905,7 +32699,6 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_s16(transmute(a))) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] @@ -33934,7 +32727,6 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] @@ -33948,7 +32740,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] fn _vld3q_lane_f32( ptr: *const i8, @@ -33961,7 +32753,6 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - } _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] @@ -33975,7 +32766,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] fn _vld3q_lane_f32( ptr: *const i8, @@ -33996,7 +32787,6 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] @@ -34008,13 +32798,12 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } _vld4_dup_f32(a as *const i8, 4) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] @@ -34026,7 +32815,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } @@ -34037,7 +32826,6 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] @@ -34049,13 +32837,12 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } _vld4q_dup_f32(a as *const i8, 4) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] @@ -34067,7 +32854,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } @@ -34078,7 +32865,6 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] @@ -34090,13 +32876,12 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } _vld4_dup_s8(a as *const i8, 1) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] @@ -34108,7 +32893,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } @@ -34119,7 +32904,6 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] @@ -34131,13 +32915,12 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } _vld4q_dup_s8(a as *const i8, 1) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] @@ -34149,7 +32932,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } @@ -34176,7 +32959,6 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { ); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] @@ -34188,13 +32970,12 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } _vld4_dup_s16(a as *const i8, 2) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] @@ -34206,7 +32987,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } @@ -34217,7 +32998,6 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] @@ -34229,13 +33009,12 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } _vld4q_dup_s16(a as *const i8, 2) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] @@ -34247,7 +33026,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } @@ -34258,7 +33037,6 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] @@ -34270,13 +33048,12 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } _vld4_dup_s32(a as *const i8, 4) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] @@ -34288,7 +33065,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } @@ -34299,7 +33076,6 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] @@ -34311,13 +33087,12 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } _vld4q_dup_s32(a as *const i8, 4) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] @@ -34329,7 +33104,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(vld4))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } @@ -34340,7 +33115,6 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] @@ -34352,7 +33126,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" @@ -34361,7 +33135,6 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { } _vld4_dup_f32(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] @@ -34373,7 +33146,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" @@ -34387,7 +33160,6 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] @@ -34399,7 +33171,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" @@ -34408,7 +33180,6 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { } _vld4q_dup_f32(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] @@ -34420,7 +33191,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" @@ -34434,7 +33205,6 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] @@ -34446,7 +33216,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" @@ -34455,7 +33225,6 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { } _vld4_dup_s8(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] @@ -34467,7 +33236,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" @@ -34481,7 +33250,6 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] @@ -34493,7 +33261,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" @@ -34502,7 +33270,6 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { } _vld4q_dup_s8(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] @@ -34514,7 +33281,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" @@ -34544,7 +33311,6 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { ); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] @@ -34556,7 +33322,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" @@ -34565,7 +33331,6 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { } _vld4_dup_s16(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] @@ -34577,7 +33342,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" @@ -34591,7 +33356,6 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] @@ -34603,7 +33367,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" @@ -34612,7 +33376,6 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { } _vld4q_dup_s16(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] @@ -34624,7 +33387,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" @@ -34638,7 +33401,6 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] @@ -34650,7 +33412,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" @@ -34659,7 +33421,6 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { } _vld4_dup_s32(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] @@ -34671,7 +33432,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" @@ -34685,7 +33446,6 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] @@ -34697,7 +33457,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" @@ -34706,7 +33466,6 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { } _vld4q_dup_s32(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] @@ -34718,7 +33477,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" @@ -34732,7 +33491,6 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] #[doc = "## Safety"] @@ -34743,7 +33501,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" @@ -34752,7 +33510,6 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { } _vld4_dup_s64(a as _) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] #[doc = "## Safety"] @@ -34776,7 +33533,6 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] #[doc = "## Safety"] @@ -34787,13 +33543,12 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[cfg_attr(test, assert_instr(nop))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } _vld4_dup_s64(a as *const i8, 8) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] #[doc = "## Safety"] @@ -34817,7 +33572,6 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] @@ -34842,7 +33596,6 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] @@ -34872,7 +33625,6 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] @@ -34897,7 +33649,6 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] @@ -34943,7 +33694,6 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { ); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] @@ -34968,7 +33718,6 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] @@ -34998,7 +33747,6 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] @@ -35023,7 +33771,6 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] @@ -35053,7 +33800,6 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] @@ -35078,7 +33824,6 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_dup_s32(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] @@ -35108,7 +33853,6 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] @@ -35133,7 +33877,6 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_dup_s32(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] @@ -35163,7 +33906,6 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] @@ -35188,7 +33930,6 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] @@ -35218,7 +33959,6 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] @@ -35243,7 +33983,6 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] @@ -35289,7 +34028,6 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { ); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] @@ -35314,7 +34052,6 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] @@ -35344,7 +34081,6 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] @@ -35369,7 +34105,6 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } - #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] @@ -35399,7 +34134,6 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] @@ -35411,7 +34145,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f32.p0" @@ -35420,7 +34154,6 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { } _vld4_f32(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] @@ -35432,7 +34165,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f32.p0" @@ -35446,7 +34179,6 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] @@ -35458,7 +34190,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4f32.p0" @@ -35467,7 +34199,6 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { } _vld4q_f32(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] @@ -35479,7 +34210,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4f32.p0" @@ -35493,7 +34224,6 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] @@ -35505,7 +34235,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i8.p0" @@ -35514,7 +34244,6 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { } _vld4_s8(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] @@ -35526,7 +34255,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i8.p0" @@ -35540,7 +34269,6 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] @@ -35552,7 +34280,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v16i8.p0" @@ -35561,7 +34289,6 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { } _vld4q_s8(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] @@ -35573,7 +34300,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v16i8.p0" @@ -35603,7 +34330,6 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] @@ -35615,7 +34341,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i16.p0" @@ -35624,7 +34350,6 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { } _vld4_s16(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] @@ -35636,7 +34361,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i16.p0" @@ -35650,7 +34375,6 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] @@ -35662,7 +34386,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i16.p0" @@ -35671,7 +34395,6 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { } _vld4q_s16(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] @@ -35683,7 +34406,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v8i16.p0" @@ -35697,7 +34420,6 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] @@ -35709,7 +34431,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i32.p0" @@ -35718,7 +34440,6 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { } _vld4_s32(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] @@ -35730,7 +34451,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i32.p0" @@ -35744,7 +34465,6 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] @@ -35756,7 +34476,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i32.p0" @@ -35765,7 +34485,6 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { } _vld4q_s32(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] @@ -35777,7 +34496,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v4i32.p0" @@ -35791,7 +34510,6 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] @@ -35803,13 +34521,12 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } _vld4_f32(a as *const i8, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] @@ -35821,7 +34538,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } @@ -35832,7 +34549,6 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] @@ -35844,13 +34560,12 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } _vld4q_f32(a as *const i8, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] @@ -35862,7 +34577,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; } @@ -35873,7 +34588,6 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] @@ -35885,13 +34599,12 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } _vld4_s8(a as *const i8, 1) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] @@ -35903,7 +34616,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; } @@ -35914,7 +34627,6 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] @@ -35926,13 +34638,12 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } _vld4q_s8(a as *const i8, 1) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] @@ -35944,7 +34655,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; } @@ -35971,7 +34682,6 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] @@ -35983,13 +34693,12 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } _vld4_s16(a as *const i8, 2) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] @@ -36001,7 +34710,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; } @@ -36012,7 +34721,6 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] @@ -36024,13 +34732,12 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } _vld4q_s16(a as *const i8, 2) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] @@ -36042,7 +34749,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; } @@ -36053,7 +34760,6 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] @@ -36065,13 +34771,12 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } _vld4_s32(a as *const i8, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] @@ -36083,7 +34788,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; } @@ -36094,7 +34799,6 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] @@ -36106,13 +34810,12 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } _vld4q_s32(a as *const i8, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] @@ -36124,7 +34827,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; } @@ -36135,7 +34838,6 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] @@ -36149,7 +34851,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" @@ -36165,7 +34867,6 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> } _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] @@ -36179,7 +34880,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" @@ -36205,7 +34906,6 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] @@ -36219,7 +34919,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" @@ -36235,7 +34935,6 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - } _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] @@ -36249,7 +34948,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" @@ -36275,7 +34974,6 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] @@ -36289,7 +34987,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" @@ -36305,7 +35003,6 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 } _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] @@ -36319,7 +35016,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" @@ -36345,7 +35042,6 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] @@ -36359,7 +35055,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" @@ -36375,7 +35071,6 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i } _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] @@ -36389,7 +35084,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" @@ -36415,7 +35110,6 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] @@ -36429,7 +35123,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" @@ -36445,7 +35139,6 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> } _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] @@ -36459,7 +35152,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" @@ -36485,7 +35178,6 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] @@ -36499,7 +35191,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" @@ -36515,7 +35207,6 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i } _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] @@ -36529,7 +35220,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" @@ -36555,7 +35246,6 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] @@ -36569,7 +35259,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" @@ -36585,7 +35275,6 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> } _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] @@ -36599,7 +35288,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" @@ -36625,7 +35314,6 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] @@ -36639,7 +35327,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] fn _vld4_lane_f32( ptr: *const i8, @@ -36653,7 +35341,6 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> } _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] @@ -36667,7 +35354,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] fn _vld4_lane_f32( ptr: *const i8, @@ -36691,7 +35378,6 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] @@ -36705,7 +35391,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] fn _vld4q_lane_f32( ptr: *const i8, @@ -36719,7 +35405,6 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - } _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] @@ -36733,7 +35418,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] fn _vld4q_lane_f32( ptr: *const i8, @@ -36757,7 +35442,6 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] @@ -36771,7 +35455,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] fn _vld4_lane_s8( ptr: *const i8, @@ -36785,7 +35469,6 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 } _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] @@ -36799,7 +35482,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] fn _vld4_lane_s8( ptr: *const i8, @@ -36823,7 +35506,6 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] @@ -36837,7 +35519,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] fn _vld4_lane_s16( ptr: *const i8, @@ -36851,7 +35533,6 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i } _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] @@ -36865,7 +35546,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] fn _vld4_lane_s16( ptr: *const i8, @@ -36889,7 +35570,6 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] @@ -36903,7 +35583,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] fn _vld4q_lane_s16( ptr: *const i8, @@ -36917,7 +35597,6 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> } _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] @@ -36931,7 +35610,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] fn _vld4q_lane_s16( ptr: *const i8, @@ -36955,7 +35634,6 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] @@ -36969,7 +35647,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] fn _vld4_lane_s32( ptr: *const i8, @@ -36983,7 +35661,6 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i } _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] @@ -36997,7 +35674,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] fn _vld4_lane_s32( ptr: *const i8, @@ -37021,7 +35698,6 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] @@ -37035,7 +35711,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] fn _vld4q_lane_s32( ptr: *const i8, @@ -37049,7 +35725,6 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> } _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] @@ -37063,7 +35738,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] fn _vld4q_lane_s32( ptr: *const i8, @@ -37087,7 +35762,6 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] #[doc = "## Safety"] @@ -37114,7 +35788,6 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] #[doc = "## Safety"] @@ -37151,7 +35824,6 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] #[doc = "## Safety"] @@ -37178,7 +35850,6 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] #[doc = "## Safety"] @@ -37215,7 +35886,6 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] #[doc = "## Safety"] @@ -37242,7 +35912,6 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] #[doc = "## Safety"] @@ -37279,7 +35948,6 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] @@ -37306,7 +35974,6 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> static_assert_uimm_bits!(LANE, 1); transmute(vld4_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] @@ -37343,7 +36010,6 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] @@ -37370,7 +36036,6 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] @@ -37407,7 +36072,6 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] @@ -37434,7 +36098,6 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] @@ -37471,7 +36134,6 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] @@ -37498,7 +36160,6 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] @@ -37535,7 +36196,6 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] @@ -37562,7 +36222,6 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] @@ -37599,7 +36258,6 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] #[doc = "## Safety"] @@ -37623,7 +36281,6 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_s64(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] @@ -37634,7 +36291,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v1i64.p0" @@ -37643,7 +36300,6 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { } _vld4_s64(a as _) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] @@ -37654,13 +36310,12 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } _vld4_s64(a as *const i8, 8) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] #[doc = "## Safety"] @@ -37684,7 +36339,6 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_s64(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] @@ -37709,7 +36363,6 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_s8(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] @@ -37739,7 +36392,6 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] @@ -37764,7 +36416,6 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_s8(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] @@ -37810,7 +36461,6 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] @@ -37835,7 +36485,6 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_s16(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] @@ -37865,7 +36514,6 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] @@ -37890,7 +36538,6 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_s16(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] @@ -37920,7 +36567,6 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] @@ -37945,7 +36591,6 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_s32(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] @@ -37975,7 +36620,6 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] @@ -38000,7 +36644,6 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_s32(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] @@ -38030,7 +36673,6 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] @@ -38055,7 +36697,6 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_s8(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] @@ -38085,7 +36726,6 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] @@ -38110,7 +36750,6 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_s8(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] @@ -38156,7 +36795,6 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { ); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] @@ -38181,7 +36819,6 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_s16(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] @@ -38211,7 +36848,6 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] @@ -38236,7 +36872,6 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_s16(transmute(a))) } - #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] @@ -38266,7 +36901,6 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] #[doc = "## Safety"] @@ -38289,7 +36923,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38299,7 +36933,6 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vmax_f32(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] #[doc = "## Safety"] @@ -38322,7 +36955,7 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38335,7 +36968,6 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vmax_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] #[doc = "## Safety"] @@ -38358,7 +36990,7 @@ pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38368,7 +37000,6 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vmaxq_f32(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] #[doc = "## Safety"] @@ -38391,7 +37022,7 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38404,7 +37035,6 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vmaxq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] #[doc = "## Safety"] @@ -38427,7 +37057,7 @@ pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38437,7 +37067,6 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vmax_s8(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] #[doc = "## Safety"] @@ -38460,7 +37089,7 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38473,7 +37102,6 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vmax_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] #[doc = "## Safety"] @@ -38496,7 +37124,7 @@ pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38506,7 +37134,6 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vmaxq_s8(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] #[doc = "## Safety"] @@ -38529,7 +37156,7 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38546,7 +37173,6 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] #[doc = "## Safety"] @@ -38569,7 +37195,7 @@ pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38579,7 +37205,6 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vmax_s16(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] #[doc = "## Safety"] @@ -38602,7 +37227,7 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38615,7 +37240,6 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vmax_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] #[doc = "## Safety"] @@ -38638,7 +37262,7 @@ pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38648,7 +37272,6 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vmaxq_s16(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] #[doc = "## Safety"] @@ -38671,7 +37294,7 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38684,7 +37307,6 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vmaxq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] #[doc = "## Safety"] @@ -38707,7 +37329,7 @@ pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38717,7 +37339,6 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vmax_s32(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] #[doc = "## Safety"] @@ -38740,7 +37361,7 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38753,7 +37374,6 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vmax_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] #[doc = "## Safety"] @@ -38776,7 +37396,7 @@ pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38786,7 +37406,6 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vmaxq_s32(a, b) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] #[doc = "## Safety"] @@ -38809,7 +37428,7 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38822,7 +37441,6 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vmaxq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] #[doc = "## Safety"] @@ -38845,7 +37463,7 @@ pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38855,7 +37473,6 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] #[doc = "## Safety"] @@ -38878,7 +37495,7 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38891,7 +37508,6 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] #[doc = "## Safety"] @@ -38914,7 +37530,7 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38924,7 +37540,6 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] #[doc = "## Safety"] @@ -38947,7 +37562,7 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38964,7 +37579,6 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] #[doc = "## Safety"] @@ -38987,7 +37601,7 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -38997,7 +37611,6 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] #[doc = "## Safety"] @@ -39020,7 +37633,7 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39033,7 +37646,6 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] #[doc = "## Safety"] @@ -39056,7 +37668,7 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39066,7 +37678,6 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] #[doc = "## Safety"] @@ -39089,7 +37700,7 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39102,7 +37713,6 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] #[doc = "## Safety"] @@ -39125,7 +37735,7 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39135,7 +37745,6 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] #[doc = "## Safety"] @@ -39158,7 +37767,7 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39171,7 +37780,6 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] #[doc = "## Safety"] @@ -39194,7 +37802,7 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39204,7 +37812,6 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] #[doc = "## Safety"] @@ -39227,7 +37834,7 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39240,7 +37847,6 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] #[doc = "## Safety"] @@ -39263,7 +37869,7 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39273,7 +37879,6 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vmaxnm_f32(a, b) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] #[doc = "## Safety"] @@ -39296,7 +37901,7 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39309,7 +37914,6 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vmaxnm_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] #[doc = "## Safety"] @@ -39332,7 +37936,7 @@ pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39342,7 +37946,6 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vmaxnmq_f32(a, b) } - #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] #[doc = "## Safety"] @@ -39365,7 +37968,7 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39378,7 +37981,6 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vmaxnmq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] #[doc = "## Safety"] @@ -39401,7 +38003,7 @@ pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39411,7 +38013,6 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vmin_f32(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] #[doc = "## Safety"] @@ -39434,7 +38035,7 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39447,7 +38048,6 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vmin_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] #[doc = "## Safety"] @@ -39470,7 +38070,7 @@ pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39480,7 +38080,6 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vminq_f32(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] #[doc = "## Safety"] @@ -39503,7 +38102,7 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39516,7 +38115,6 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vminq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] #[doc = "## Safety"] @@ -39539,7 +38137,7 @@ pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39549,7 +38147,6 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vmin_s8(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] #[doc = "## Safety"] @@ -39572,7 +38169,7 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39585,7 +38182,6 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vmin_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] #[doc = "## Safety"] @@ -39608,7 +38204,7 @@ pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39618,7 +38214,6 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vminq_s8(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] #[doc = "## Safety"] @@ -39641,7 +38236,7 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39658,7 +38253,6 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] #[doc = "## Safety"] @@ -39681,7 +38275,7 @@ pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39691,7 +38285,6 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vmin_s16(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] #[doc = "## Safety"] @@ -39714,7 +38307,7 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39727,7 +38320,6 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vmin_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] #[doc = "## Safety"] @@ -39750,7 +38342,7 @@ pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39760,7 +38352,6 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vminq_s16(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] #[doc = "## Safety"] @@ -39783,7 +38374,7 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39796,7 +38387,6 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vminq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] #[doc = "## Safety"] @@ -39819,7 +38409,7 @@ pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39829,7 +38419,6 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vmin_s32(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] #[doc = "## Safety"] @@ -39852,7 +38441,7 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39865,7 +38454,6 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vmin_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] #[doc = "## Safety"] @@ -39888,7 +38476,7 @@ pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39898,7 +38486,6 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vminq_s32(a, b) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] #[doc = "## Safety"] @@ -39921,7 +38508,7 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39934,7 +38521,6 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vminq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] #[doc = "## Safety"] @@ -39957,7 +38543,7 @@ pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -39967,7 +38553,6 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] #[doc = "## Safety"] @@ -39990,7 +38575,7 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40003,7 +38588,6 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] #[doc = "## Safety"] @@ -40026,7 +38610,7 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40036,7 +38620,6 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] #[doc = "## Safety"] @@ -40059,7 +38642,7 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40076,7 +38659,6 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] #[doc = "## Safety"] @@ -40099,7 +38681,7 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40109,7 +38691,6 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] #[doc = "## Safety"] @@ -40132,7 +38713,7 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40145,7 +38726,6 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] #[doc = "## Safety"] @@ -40168,7 +38748,7 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40178,7 +38758,6 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] #[doc = "## Safety"] @@ -40201,7 +38780,7 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40214,7 +38793,6 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] #[doc = "## Safety"] @@ -40237,7 +38815,7 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40247,7 +38825,6 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] #[doc = "## Safety"] @@ -40270,7 +38847,7 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40283,7 +38860,6 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] #[doc = "## Safety"] @@ -40306,7 +38882,7 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40316,7 +38892,6 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] #[doc = "## Safety"] @@ -40339,7 +38914,7 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40352,7 +38927,6 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] #[doc = "## Safety"] @@ -40375,7 +38949,7 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40385,7 +38959,6 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vminnm_f32(a, b) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] #[doc = "## Safety"] @@ -40408,7 +38981,7 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40421,7 +38994,6 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vminnm_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] #[doc = "## Safety"] @@ -40444,7 +39016,7 @@ pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40454,7 +39026,6 @@ pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vminnmq_f32(a, b) } - #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] #[doc = "## Safety"] @@ -40477,7 +39048,7 @@ pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -40490,7 +39061,6 @@ pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vminnmq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] #[doc = "## Safety"] @@ -40515,7 +39085,6 @@ pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] #[doc = "## Safety"] @@ -40544,7 +39113,6 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 let ret_val: float32x2_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] #[doc = "## Safety"] @@ -40569,7 +39137,6 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] #[doc = "## Safety"] @@ -40598,7 +39165,6 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float let ret_val: float32x4_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] #[doc = "## Safety"] @@ -40629,7 +39195,6 @@ pub unsafe fn vmla_lane_f32( static_assert_uimm_bits!(LANE, 1); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] #[doc = "## Safety"] @@ -40664,7 +39229,6 @@ pub unsafe fn vmla_lane_f32( let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] #[doc = "## Safety"] @@ -40695,7 +39259,6 @@ pub unsafe fn vmla_laneq_f32( static_assert_uimm_bits!(LANE, 2); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] #[doc = "## Safety"] @@ -40730,7 +39293,6 @@ pub unsafe fn vmla_laneq_f32( let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] #[doc = "## Safety"] @@ -40765,7 +39327,6 @@ pub unsafe fn vmlaq_lane_f32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] #[doc = "## Safety"] @@ -40804,7 +39365,6 @@ pub unsafe fn vmlaq_lane_f32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] #[doc = "## Safety"] @@ -40839,7 +39399,6 @@ pub unsafe fn vmlaq_laneq_f32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] #[doc = "## Safety"] @@ -40878,7 +39437,6 @@ pub unsafe fn vmlaq_laneq_f32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] #[doc = "## Safety"] @@ -40913,7 +39471,6 @@ pub unsafe fn vmla_lane_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] #[doc = "## Safety"] @@ -40952,7 +39509,6 @@ pub unsafe fn vmla_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] #[doc = "## Safety"] @@ -40987,7 +39543,6 @@ pub unsafe fn vmla_lane_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] #[doc = "## Safety"] @@ -41026,7 +39581,6 @@ pub unsafe fn vmla_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] #[doc = "## Safety"] @@ -41061,7 +39615,6 @@ pub unsafe fn vmla_laneq_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] #[doc = "## Safety"] @@ -41100,7 +39653,6 @@ pub unsafe fn vmla_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] #[doc = "## Safety"] @@ -41135,7 +39687,6 @@ pub unsafe fn vmla_laneq_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] #[doc = "## Safety"] @@ -41174,7 +39725,6 @@ pub unsafe fn vmla_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] #[doc = "## Safety"] @@ -41222,7 +39772,6 @@ pub unsafe fn vmlaq_lane_s16( ), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] #[doc = "## Safety"] @@ -41274,7 +39823,6 @@ pub unsafe fn vmlaq_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] #[doc = "## Safety"] @@ -41322,7 +39870,6 @@ pub unsafe fn vmlaq_lane_u16( ), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] #[doc = "## Safety"] @@ -41374,7 +39921,6 @@ pub unsafe fn vmlaq_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] #[doc = "## Safety"] @@ -41422,7 +39968,6 @@ pub unsafe fn vmlaq_laneq_s16( ), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] #[doc = "## Safety"] @@ -41474,7 +40019,6 @@ pub unsafe fn vmlaq_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] #[doc = "## Safety"] @@ -41522,7 +40066,6 @@ pub unsafe fn vmlaq_laneq_u16( ), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] #[doc = "## Safety"] @@ -41574,7 +40117,6 @@ pub unsafe fn vmlaq_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] #[doc = "## Safety"] @@ -41605,7 +40147,6 @@ pub unsafe fn vmla_lane_s32( static_assert_uimm_bits!(LANE, 1); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] #[doc = "## Safety"] @@ -41640,7 +40181,6 @@ pub unsafe fn vmla_lane_s32( let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] #[doc = "## Safety"] @@ -41671,7 +40211,6 @@ pub unsafe fn vmla_lane_u32( static_assert_uimm_bits!(LANE, 1); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] #[doc = "## Safety"] @@ -41706,7 +40245,6 @@ pub unsafe fn vmla_lane_u32( let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] #[doc = "## Safety"] @@ -41737,7 +40275,6 @@ pub unsafe fn vmla_laneq_s32( static_assert_uimm_bits!(LANE, 2); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] #[doc = "## Safety"] @@ -41772,7 +40309,6 @@ pub unsafe fn vmla_laneq_s32( let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] #[doc = "## Safety"] @@ -41803,7 +40339,6 @@ pub unsafe fn vmla_laneq_u32( static_assert_uimm_bits!(LANE, 2); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] #[doc = "## Safety"] @@ -41838,7 +40373,6 @@ pub unsafe fn vmla_laneq_u32( let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] #[doc = "## Safety"] @@ -41873,7 +40407,6 @@ pub unsafe fn vmlaq_lane_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] #[doc = "## Safety"] @@ -41912,7 +40445,6 @@ pub unsafe fn vmlaq_lane_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] #[doc = "## Safety"] @@ -41947,7 +40479,6 @@ pub unsafe fn vmlaq_lane_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] #[doc = "## Safety"] @@ -41986,7 +40517,6 @@ pub unsafe fn vmlaq_lane_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] #[doc = "## Safety"] @@ -42021,7 +40551,6 @@ pub unsafe fn vmlaq_laneq_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] #[doc = "## Safety"] @@ -42060,7 +40589,6 @@ pub unsafe fn vmlaq_laneq_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] #[doc = "## Safety"] @@ -42095,7 +40623,6 @@ pub unsafe fn vmlaq_laneq_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] #[doc = "## Safety"] @@ -42134,7 +40661,6 @@ pub unsafe fn vmlaq_laneq_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] #[doc = "## Safety"] @@ -42159,7 +40685,6 @@ pub unsafe fn vmlaq_laneq_u32( pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmla_f32(a, b, vdup_n_f32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] #[doc = "## Safety"] @@ -42187,7 +40712,6 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t let ret_val: float32x2_t = vmla_f32(a, b, vdup_n_f32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] #[doc = "## Safety"] @@ -42212,7 +40736,6 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlaq_f32(a, b, vdupq_n_f32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] #[doc = "## Safety"] @@ -42240,7 +40763,6 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t let ret_val: float32x4_t = vmlaq_f32(a, b, vdupq_n_f32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] #[doc = "## Safety"] @@ -42265,7 +40787,6 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmla_s16(a, b, vdup_n_s16(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] #[doc = "## Safety"] @@ -42293,7 +40814,6 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { let ret_val: int16x4_t = vmla_s16(a, b, vdup_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] #[doc = "## Safety"] @@ -42318,7 +40838,6 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlaq_s16(a, b, vdupq_n_s16(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] #[doc = "## Safety"] @@ -42346,7 +40865,6 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { let ret_val: int16x8_t = vmlaq_s16(a, b, vdupq_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] #[doc = "## Safety"] @@ -42371,7 +40889,6 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmla_u16(a, b, vdup_n_u16(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] #[doc = "## Safety"] @@ -42399,7 +40916,6 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { let ret_val: uint16x4_t = vmla_u16(a, b, vdup_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] #[doc = "## Safety"] @@ -42424,7 +40940,6 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlaq_u16(a, b, vdupq_n_u16(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] #[doc = "## Safety"] @@ -42452,7 +40967,6 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { let ret_val: uint16x8_t = vmlaq_u16(a, b, vdupq_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] #[doc = "## Safety"] @@ -42477,7 +40991,6 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmla_s32(a, b, vdup_n_s32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] #[doc = "## Safety"] @@ -42505,7 +41018,6 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { let ret_val: int32x2_t = vmla_s32(a, b, vdup_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] #[doc = "## Safety"] @@ -42530,7 +41042,6 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlaq_s32(a, b, vdupq_n_s32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] #[doc = "## Safety"] @@ -42558,7 +41069,6 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { let ret_val: int32x4_t = vmlaq_s32(a, b, vdupq_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] #[doc = "## Safety"] @@ -42583,7 +41093,6 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmla_u32(a, b, vdup_n_u32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] #[doc = "## Safety"] @@ -42611,7 +41120,6 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { let ret_val: uint32x2_t = vmla_u32(a, b, vdup_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] #[doc = "## Safety"] @@ -42636,7 +41144,6 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlaq_u32(a, b, vdupq_n_u32(c)) } - #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] #[doc = "## Safety"] @@ -42664,7 +41171,6 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { let ret_val: uint32x4_t = vmlaq_u32(a, b, vdupq_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] #[doc = "## Safety"] @@ -42689,7 +41195,6 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] #[doc = "## Safety"] @@ -42718,7 +41223,6 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] #[doc = "## Safety"] @@ -42743,7 +41247,6 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] #[doc = "## Safety"] @@ -42776,7 +41279,6 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] #[doc = "## Safety"] @@ -42801,7 +41303,6 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] #[doc = "## Safety"] @@ -42830,7 +41331,6 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] #[doc = "## Safety"] @@ -42855,7 +41355,6 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] #[doc = "## Safety"] @@ -42884,7 +41383,6 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] #[doc = "## Safety"] @@ -42909,7 +41407,6 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] #[doc = "## Safety"] @@ -42938,7 +41435,6 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] #[doc = "## Safety"] @@ -42963,7 +41459,6 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] #[doc = "## Safety"] @@ -42992,7 +41487,6 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] #[doc = "## Safety"] @@ -43017,7 +41511,6 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] #[doc = "## Safety"] @@ -43046,7 +41539,6 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] #[doc = "## Safety"] @@ -43071,7 +41563,6 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] #[doc = "## Safety"] @@ -43104,7 +41595,6 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] #[doc = "## Safety"] @@ -43129,7 +41619,6 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] #[doc = "## Safety"] @@ -43158,7 +41647,6 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ let ret_val: uint16x4_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] #[doc = "## Safety"] @@ -43183,7 +41671,6 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] #[doc = "## Safety"] @@ -43212,7 +41699,6 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 let ret_val: uint16x8_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] #[doc = "## Safety"] @@ -43237,7 +41723,6 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] #[doc = "## Safety"] @@ -43266,7 +41751,6 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ let ret_val: uint32x2_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] #[doc = "## Safety"] @@ -43291,7 +41775,6 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, simd_mul(b, c)) } - #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] #[doc = "## Safety"] @@ -43320,7 +41803,6 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_add(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] #[doc = "## Safety"] @@ -43355,7 +41837,6 @@ pub unsafe fn vmlal_lane_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] #[doc = "## Safety"] @@ -43394,7 +41875,6 @@ pub unsafe fn vmlal_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] #[doc = "## Safety"] @@ -43429,7 +41909,6 @@ pub unsafe fn vmlal_laneq_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] #[doc = "## Safety"] @@ -43468,7 +41947,6 @@ pub unsafe fn vmlal_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] #[doc = "## Safety"] @@ -43499,7 +41977,6 @@ pub unsafe fn vmlal_lane_s32( static_assert_uimm_bits!(LANE, 1); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] #[doc = "## Safety"] @@ -43534,7 +42011,6 @@ pub unsafe fn vmlal_lane_s32( let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] #[doc = "## Safety"] @@ -43565,7 +42041,6 @@ pub unsafe fn vmlal_laneq_s32( static_assert_uimm_bits!(LANE, 2); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] #[doc = "## Safety"] @@ -43600,7 +42075,6 @@ pub unsafe fn vmlal_laneq_s32( let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] #[doc = "## Safety"] @@ -43635,7 +42109,6 @@ pub unsafe fn vmlal_lane_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] #[doc = "## Safety"] @@ -43674,7 +42147,6 @@ pub unsafe fn vmlal_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] #[doc = "## Safety"] @@ -43709,7 +42181,6 @@ pub unsafe fn vmlal_laneq_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] #[doc = "## Safety"] @@ -43748,7 +42219,6 @@ pub unsafe fn vmlal_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] #[doc = "## Safety"] @@ -43779,7 +42249,6 @@ pub unsafe fn vmlal_lane_u32( static_assert_uimm_bits!(LANE, 1); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] #[doc = "## Safety"] @@ -43814,7 +42283,6 @@ pub unsafe fn vmlal_lane_u32( let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] #[doc = "## Safety"] @@ -43845,7 +42313,6 @@ pub unsafe fn vmlal_laneq_u32( static_assert_uimm_bits!(LANE, 2); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] #[doc = "## Safety"] @@ -43880,7 +42347,6 @@ pub unsafe fn vmlal_laneq_u32( let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] #[doc = "## Safety"] @@ -43905,7 +42371,6 @@ pub unsafe fn vmlal_laneq_u32( pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlal_s16(a, b, vdup_n_s16(c)) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] #[doc = "## Safety"] @@ -43933,7 +42398,6 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { let ret_val: int32x4_t = vmlal_s16(a, b, vdup_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] #[doc = "## Safety"] @@ -43958,7 +42422,6 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlal_s32(a, b, vdup_n_s32(c)) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] #[doc = "## Safety"] @@ -43986,7 +42449,6 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { let ret_val: int64x2_t = vmlal_s32(a, b, vdup_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] #[doc = "## Safety"] @@ -44011,7 +42473,6 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlal_u16(a, b, vdup_n_u16(c)) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] #[doc = "## Safety"] @@ -44039,7 +42500,6 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { let ret_val: uint32x4_t = vmlal_u16(a, b, vdup_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] #[doc = "## Safety"] @@ -44064,7 +42524,6 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlal_u32(a, b, vdup_n_u32(c)) } - #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] #[doc = "## Safety"] @@ -44092,7 +42551,6 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { let ret_val: uint64x2_t = vmlal_u32(a, b, vdup_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] #[doc = "## Safety"] @@ -44117,7 +42575,6 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, vmull_s8(b, c)) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] #[doc = "## Safety"] @@ -44146,7 +42603,6 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_add(a, vmull_s8(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] #[doc = "## Safety"] @@ -44171,7 +42627,6 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, vmull_s16(b, c)) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] #[doc = "## Safety"] @@ -44200,7 +42655,6 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_add(a, vmull_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] #[doc = "## Safety"] @@ -44225,7 +42679,6 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_add(a, vmull_s32(b, c)) } - #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] #[doc = "## Safety"] @@ -44254,7 +42707,6 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_add(a, vmull_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] #[doc = "## Safety"] @@ -44279,7 +42731,6 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_add(a, vmull_u8(b, c)) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] #[doc = "## Safety"] @@ -44308,7 +42759,6 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t let ret_val: uint16x8_t = simd_add(a, vmull_u8(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] #[doc = "## Safety"] @@ -44333,7 +42783,6 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_add(a, vmull_u16(b, c)) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] #[doc = "## Safety"] @@ -44362,7 +42811,6 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_add(a, vmull_u16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] #[doc = "## Safety"] @@ -44387,7 +42835,6 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_add(a, vmull_u32(b, c)) } - #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] #[doc = "## Safety"] @@ -44416,7 +42863,6 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 let ret_val: uint64x2_t = simd_add(a, vmull_u32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] #[doc = "## Safety"] @@ -44441,7 +42887,6 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] #[doc = "## Safety"] @@ -44470,7 +42915,6 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 let ret_val: float32x2_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] #[doc = "## Safety"] @@ -44495,7 +42939,6 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] #[doc = "## Safety"] @@ -44524,7 +42967,6 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float let ret_val: float32x4_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] #[doc = "## Safety"] @@ -44555,7 +42997,6 @@ pub unsafe fn vmls_lane_f32( static_assert_uimm_bits!(LANE, 1); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] #[doc = "## Safety"] @@ -44590,7 +43031,6 @@ pub unsafe fn vmls_lane_f32( let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] #[doc = "## Safety"] @@ -44621,7 +43061,6 @@ pub unsafe fn vmls_laneq_f32( static_assert_uimm_bits!(LANE, 2); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] #[doc = "## Safety"] @@ -44656,7 +43095,6 @@ pub unsafe fn vmls_laneq_f32( let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] #[doc = "## Safety"] @@ -44691,7 +43129,6 @@ pub unsafe fn vmlsq_lane_f32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] #[doc = "## Safety"] @@ -44730,7 +43167,6 @@ pub unsafe fn vmlsq_lane_f32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] #[doc = "## Safety"] @@ -44765,7 +43201,6 @@ pub unsafe fn vmlsq_laneq_f32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] #[doc = "## Safety"] @@ -44804,7 +43239,6 @@ pub unsafe fn vmlsq_laneq_f32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] #[doc = "## Safety"] @@ -44839,7 +43273,6 @@ pub unsafe fn vmls_lane_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] #[doc = "## Safety"] @@ -44878,7 +43311,6 @@ pub unsafe fn vmls_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] #[doc = "## Safety"] @@ -44913,7 +43345,6 @@ pub unsafe fn vmls_lane_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] #[doc = "## Safety"] @@ -44952,7 +43383,6 @@ pub unsafe fn vmls_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] #[doc = "## Safety"] @@ -44987,7 +43417,6 @@ pub unsafe fn vmls_laneq_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] #[doc = "## Safety"] @@ -45026,7 +43455,6 @@ pub unsafe fn vmls_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] #[doc = "## Safety"] @@ -45061,7 +43489,6 @@ pub unsafe fn vmls_laneq_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] #[doc = "## Safety"] @@ -45100,7 +43527,6 @@ pub unsafe fn vmls_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] #[doc = "## Safety"] @@ -45148,7 +43574,6 @@ pub unsafe fn vmlsq_lane_s16( ), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] #[doc = "## Safety"] @@ -45200,7 +43625,6 @@ pub unsafe fn vmlsq_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] #[doc = "## Safety"] @@ -45248,7 +43672,6 @@ pub unsafe fn vmlsq_lane_u16( ), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] #[doc = "## Safety"] @@ -45300,7 +43723,6 @@ pub unsafe fn vmlsq_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] #[doc = "## Safety"] @@ -45348,7 +43770,6 @@ pub unsafe fn vmlsq_laneq_s16( ), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] #[doc = "## Safety"] @@ -45400,7 +43821,6 @@ pub unsafe fn vmlsq_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] #[doc = "## Safety"] @@ -45448,7 +43868,6 @@ pub unsafe fn vmlsq_laneq_u16( ), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] #[doc = "## Safety"] @@ -45500,7 +43919,6 @@ pub unsafe fn vmlsq_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] #[doc = "## Safety"] @@ -45531,7 +43949,6 @@ pub unsafe fn vmls_lane_s32( static_assert_uimm_bits!(LANE, 1); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] #[doc = "## Safety"] @@ -45566,7 +43983,6 @@ pub unsafe fn vmls_lane_s32( let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] #[doc = "## Safety"] @@ -45597,7 +44013,6 @@ pub unsafe fn vmls_lane_u32( static_assert_uimm_bits!(LANE, 1); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] #[doc = "## Safety"] @@ -45632,7 +44047,6 @@ pub unsafe fn vmls_lane_u32( let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] #[doc = "## Safety"] @@ -45663,7 +44077,6 @@ pub unsafe fn vmls_laneq_s32( static_assert_uimm_bits!(LANE, 2); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] #[doc = "## Safety"] @@ -45698,7 +44111,6 @@ pub unsafe fn vmls_laneq_s32( let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] #[doc = "## Safety"] @@ -45729,7 +44141,6 @@ pub unsafe fn vmls_laneq_u32( static_assert_uimm_bits!(LANE, 2); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] #[doc = "## Safety"] @@ -45764,7 +44175,6 @@ pub unsafe fn vmls_laneq_u32( let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] #[doc = "## Safety"] @@ -45799,7 +44209,6 @@ pub unsafe fn vmlsq_lane_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] #[doc = "## Safety"] @@ -45838,7 +44247,6 @@ pub unsafe fn vmlsq_lane_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] #[doc = "## Safety"] @@ -45873,7 +44281,6 @@ pub unsafe fn vmlsq_lane_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] #[doc = "## Safety"] @@ -45912,7 +44319,6 @@ pub unsafe fn vmlsq_lane_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] #[doc = "## Safety"] @@ -45947,7 +44353,6 @@ pub unsafe fn vmlsq_laneq_s32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] #[doc = "## Safety"] @@ -45986,7 +44391,6 @@ pub unsafe fn vmlsq_laneq_s32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] #[doc = "## Safety"] @@ -46021,7 +44425,6 @@ pub unsafe fn vmlsq_laneq_u32( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] #[doc = "## Safety"] @@ -46060,7 +44463,6 @@ pub unsafe fn vmlsq_laneq_u32( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] #[doc = "## Safety"] @@ -46085,7 +44487,6 @@ pub unsafe fn vmlsq_laneq_u32( pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmls_f32(a, b, vdup_n_f32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] #[doc = "## Safety"] @@ -46113,7 +44514,6 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t let ret_val: float32x2_t = vmls_f32(a, b, vdup_n_f32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] #[doc = "## Safety"] @@ -46138,7 +44538,6 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlsq_f32(a, b, vdupq_n_f32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] #[doc = "## Safety"] @@ -46166,7 +44565,6 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t let ret_val: float32x4_t = vmlsq_f32(a, b, vdupq_n_f32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] #[doc = "## Safety"] @@ -46191,7 +44589,6 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmls_s16(a, b, vdup_n_s16(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] #[doc = "## Safety"] @@ -46219,7 +44616,6 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { let ret_val: int16x4_t = vmls_s16(a, b, vdup_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] #[doc = "## Safety"] @@ -46244,7 +44640,6 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlsq_s16(a, b, vdupq_n_s16(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] #[doc = "## Safety"] @@ -46272,7 +44667,6 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { let ret_val: int16x8_t = vmlsq_s16(a, b, vdupq_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] #[doc = "## Safety"] @@ -46297,7 +44691,6 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmls_u16(a, b, vdup_n_u16(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] #[doc = "## Safety"] @@ -46325,7 +44718,6 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { let ret_val: uint16x4_t = vmls_u16(a, b, vdup_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] #[doc = "## Safety"] @@ -46350,7 +44742,6 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlsq_u16(a, b, vdupq_n_u16(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] #[doc = "## Safety"] @@ -46378,7 +44769,6 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { let ret_val: uint16x8_t = vmlsq_u16(a, b, vdupq_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] #[doc = "## Safety"] @@ -46403,7 +44793,6 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmls_s32(a, b, vdup_n_s32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] #[doc = "## Safety"] @@ -46431,7 +44820,6 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { let ret_val: int32x2_t = vmls_s32(a, b, vdup_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] #[doc = "## Safety"] @@ -46456,7 +44844,6 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlsq_s32(a, b, vdupq_n_s32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] #[doc = "## Safety"] @@ -46484,7 +44871,6 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { let ret_val: int32x4_t = vmlsq_s32(a, b, vdupq_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] #[doc = "## Safety"] @@ -46509,7 +44895,6 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmls_u32(a, b, vdup_n_u32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] #[doc = "## Safety"] @@ -46537,7 +44922,6 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { let ret_val: uint32x2_t = vmls_u32(a, b, vdup_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] #[doc = "## Safety"] @@ -46562,7 +44946,6 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlsq_u32(a, b, vdupq_n_u32(c)) } - #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] #[doc = "## Safety"] @@ -46590,7 +44973,6 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { let ret_val: uint32x4_t = vmlsq_u32(a, b, vdupq_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] #[doc = "## Safety"] @@ -46615,7 +44997,6 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] #[doc = "## Safety"] @@ -46644,7 +45025,6 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] #[doc = "## Safety"] @@ -46669,7 +45049,6 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] #[doc = "## Safety"] @@ -46702,7 +45081,6 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] #[doc = "## Safety"] @@ -46727,7 +45105,6 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] #[doc = "## Safety"] @@ -46756,7 +45133,6 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] #[doc = "## Safety"] @@ -46781,7 +45157,6 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] #[doc = "## Safety"] @@ -46810,7 +45185,6 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] #[doc = "## Safety"] @@ -46835,7 +45209,6 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] #[doc = "## Safety"] @@ -46864,7 +45237,6 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] #[doc = "## Safety"] @@ -46889,7 +45261,6 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] #[doc = "## Safety"] @@ -46918,7 +45289,6 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] #[doc = "## Safety"] @@ -46943,7 +45313,6 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] #[doc = "## Safety"] @@ -46972,7 +45341,6 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] #[doc = "## Safety"] @@ -46997,7 +45365,6 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] #[doc = "## Safety"] @@ -47030,7 +45397,6 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] #[doc = "## Safety"] @@ -47055,7 +45421,6 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] #[doc = "## Safety"] @@ -47084,7 +45449,6 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ let ret_val: uint16x4_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] #[doc = "## Safety"] @@ -47109,7 +45473,6 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] #[doc = "## Safety"] @@ -47138,7 +45501,6 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 let ret_val: uint16x8_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] #[doc = "## Safety"] @@ -47163,7 +45525,6 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] #[doc = "## Safety"] @@ -47192,7 +45553,6 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ let ret_val: uint32x2_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] #[doc = "## Safety"] @@ -47217,7 +45577,6 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_sub(a, simd_mul(b, c)) } - #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] #[doc = "## Safety"] @@ -47246,7 +45605,6 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_sub(a, simd_mul(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] #[doc = "## Safety"] @@ -47281,7 +45639,6 @@ pub unsafe fn vmlsl_lane_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] #[doc = "## Safety"] @@ -47320,7 +45677,6 @@ pub unsafe fn vmlsl_lane_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] #[doc = "## Safety"] @@ -47355,7 +45711,6 @@ pub unsafe fn vmlsl_laneq_s16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] #[doc = "## Safety"] @@ -47394,7 +45749,6 @@ pub unsafe fn vmlsl_laneq_s16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] #[doc = "## Safety"] @@ -47425,7 +45779,6 @@ pub unsafe fn vmlsl_lane_s32( static_assert_uimm_bits!(LANE, 1); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] #[doc = "## Safety"] @@ -47460,7 +45813,6 @@ pub unsafe fn vmlsl_lane_s32( let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] #[doc = "## Safety"] @@ -47491,7 +45843,6 @@ pub unsafe fn vmlsl_laneq_s32( static_assert_uimm_bits!(LANE, 2); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] #[doc = "## Safety"] @@ -47526,7 +45877,6 @@ pub unsafe fn vmlsl_laneq_s32( let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] #[doc = "## Safety"] @@ -47561,7 +45911,6 @@ pub unsafe fn vmlsl_lane_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] #[doc = "## Safety"] @@ -47600,7 +45949,6 @@ pub unsafe fn vmlsl_lane_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] #[doc = "## Safety"] @@ -47635,7 +45983,6 @@ pub unsafe fn vmlsl_laneq_u16( simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] #[doc = "## Safety"] @@ -47674,7 +46021,6 @@ pub unsafe fn vmlsl_laneq_u16( ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] #[doc = "## Safety"] @@ -47705,7 +46051,6 @@ pub unsafe fn vmlsl_lane_u32( static_assert_uimm_bits!(LANE, 1); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] #[doc = "## Safety"] @@ -47740,7 +46085,6 @@ pub unsafe fn vmlsl_lane_u32( let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] #[doc = "## Safety"] @@ -47771,7 +46115,6 @@ pub unsafe fn vmlsl_laneq_u32( static_assert_uimm_bits!(LANE, 2); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] #[doc = "## Safety"] @@ -47806,7 +46149,6 @@ pub unsafe fn vmlsl_laneq_u32( let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] #[doc = "## Safety"] @@ -47831,7 +46173,6 @@ pub unsafe fn vmlsl_laneq_u32( pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlsl_s16(a, b, vdup_n_s16(c)) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] #[doc = "## Safety"] @@ -47859,7 +46200,6 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { let ret_val: int32x4_t = vmlsl_s16(a, b, vdup_n_s16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] #[doc = "## Safety"] @@ -47884,7 +46224,6 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlsl_s32(a, b, vdup_n_s32(c)) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] #[doc = "## Safety"] @@ -47912,7 +46251,6 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { let ret_val: int64x2_t = vmlsl_s32(a, b, vdup_n_s32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] #[doc = "## Safety"] @@ -47937,7 +46275,6 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlsl_u16(a, b, vdup_n_u16(c)) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] #[doc = "## Safety"] @@ -47965,7 +46302,6 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { let ret_val: uint32x4_t = vmlsl_u16(a, b, vdup_n_u16(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] #[doc = "## Safety"] @@ -47990,7 +46326,6 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlsl_u32(a, b, vdup_n_u32(c)) } - #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] #[doc = "## Safety"] @@ -48018,7 +46353,6 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { let ret_val: uint64x2_t = vmlsl_u32(a, b, vdup_n_u32(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] #[doc = "## Safety"] @@ -48043,7 +46377,6 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_sub(a, vmull_s8(b, c)) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] #[doc = "## Safety"] @@ -48072,7 +46405,6 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(a, vmull_s8(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] #[doc = "## Safety"] @@ -48097,7 +46429,6 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_sub(a, vmull_s16(b, c)) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] #[doc = "## Safety"] @@ -48126,7 +46457,6 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(a, vmull_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] #[doc = "## Safety"] @@ -48151,7 +46481,6 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_sub(a, vmull_s32(b, c)) } - #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] #[doc = "## Safety"] @@ -48180,7 +46509,6 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(a, vmull_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] #[doc = "## Safety"] @@ -48205,7 +46533,6 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_sub(a, vmull_u8(b, c)) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] #[doc = "## Safety"] @@ -48234,7 +46561,6 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t let ret_val: uint16x8_t = simd_sub(a, vmull_u8(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] #[doc = "## Safety"] @@ -48259,7 +46585,6 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_sub(a, vmull_u16(b, c)) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] #[doc = "## Safety"] @@ -48288,7 +46613,6 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_sub(a, vmull_u16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] #[doc = "## Safety"] @@ -48313,7 +46637,6 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_sub(a, vmull_u32(b, c)) } - #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] #[doc = "## Safety"] @@ -48342,7 +46665,6 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 let ret_val: uint64x2_t = simd_sub(a, vmull_u32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] #[doc = "## Safety"] @@ -48365,7 +46687,7 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" @@ -48375,7 +46697,6 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t } _vmmlaq_s32(a, b, c) } - #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] #[doc = "## Safety"] @@ -48398,7 +46719,7 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" @@ -48412,7 +46733,6 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t let ret_val: int32x4_t = _vmmlaq_s32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] #[doc = "## Safety"] @@ -48435,7 +46755,7 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" @@ -48445,7 +46765,6 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x } _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } - #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] #[doc = "## Safety"] @@ -48468,7 +46787,7 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" @@ -48483,7 +46802,6 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] #[doc = "## Safety"] @@ -48508,7 +46826,6 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] #[doc = "## Safety"] @@ -48536,7 +46853,6 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] #[doc = "## Safety"] @@ -48561,7 +46877,6 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] #[doc = "## Safety"] @@ -48589,7 +46904,6 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] #[doc = "## Safety"] @@ -48616,7 +46930,6 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] #[doc = "## Safety"] @@ -48646,7 +46959,6 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] #[doc = "## Safety"] @@ -48673,7 +46985,6 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] #[doc = "## Safety"] @@ -48703,7 +47014,6 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] #[doc = "## Safety"] @@ -48733,7 +47043,6 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] #[doc = "## Safety"] @@ -48766,7 +47075,6 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] #[doc = "## Safety"] @@ -48796,7 +47104,6 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] #[doc = "## Safety"] @@ -48829,7 +47136,6 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] #[doc = "## Safety"] @@ -48859,7 +47165,6 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] #[doc = "## Safety"] @@ -48892,7 +47197,6 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] #[doc = "## Safety"] @@ -48935,7 +47239,6 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int ), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] #[doc = "## Safety"] @@ -48981,7 +47284,6 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] #[doc = "## Safety"] @@ -49008,7 +47310,6 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] #[doc = "## Safety"] @@ -49038,7 +47339,6 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] #[doc = "## Safety"] @@ -49068,7 +47368,6 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] #[doc = "## Safety"] @@ -49101,7 +47400,6 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] #[doc = "## Safety"] @@ -49131,7 +47429,6 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] #[doc = "## Safety"] @@ -49164,7 +47461,6 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] #[doc = "## Safety"] @@ -49207,7 +47503,6 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u ), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] #[doc = "## Safety"] @@ -49253,7 +47548,6 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] #[doc = "## Safety"] @@ -49280,7 +47574,6 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] #[doc = "## Safety"] @@ -49310,7 +47603,6 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] #[doc = "## Safety"] @@ -49340,7 +47632,6 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] #[doc = "## Safety"] @@ -49373,7 +47664,6 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] #[doc = "## Safety"] @@ -49403,7 +47693,6 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] #[doc = "## Safety"] @@ -49436,7 +47725,6 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] #[doc = "## Safety"] @@ -49479,7 +47767,6 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in ), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] #[doc = "## Safety"] @@ -49525,7 +47812,6 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] #[doc = "## Safety"] @@ -49552,7 +47838,6 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] #[doc = "## Safety"] @@ -49582,7 +47867,6 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] #[doc = "## Safety"] @@ -49612,7 +47896,6 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] #[doc = "## Safety"] @@ -49645,7 +47928,6 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] #[doc = "## Safety"] @@ -49675,7 +47957,6 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] #[doc = "## Safety"] @@ -49708,7 +47989,6 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] #[doc = "## Safety"] @@ -49751,7 +48031,6 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> ), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] #[doc = "## Safety"] @@ -49797,7 +48076,6 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] #[doc = "## Safety"] @@ -49824,7 +48102,6 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] #[doc = "## Safety"] @@ -49854,7 +48131,6 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] #[doc = "## Safety"] @@ -49884,7 +48160,6 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] #[doc = "## Safety"] @@ -49917,7 +48192,6 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] #[doc = "## Safety"] @@ -49942,7 +48216,6 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { simd_mul(a, vdup_n_f32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] #[doc = "## Safety"] @@ -49969,7 +48242,6 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { let ret_val: float32x2_t = simd_mul(a, vdup_n_f32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] #[doc = "## Safety"] @@ -49994,7 +48266,6 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { simd_mul(a, vdupq_n_f32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] #[doc = "## Safety"] @@ -50021,7 +48292,6 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { let ret_val: float32x4_t = simd_mul(a, vdupq_n_f32(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] #[doc = "## Safety"] @@ -50046,7 +48316,6 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { simd_mul(a, vdup_n_s16(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] #[doc = "## Safety"] @@ -50073,7 +48342,6 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let ret_val: int16x4_t = simd_mul(a, vdup_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] #[doc = "## Safety"] @@ -50098,7 +48366,6 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { simd_mul(a, vdupq_n_s16(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] #[doc = "## Safety"] @@ -50125,7 +48392,6 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let ret_val: int16x8_t = simd_mul(a, vdupq_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] #[doc = "## Safety"] @@ -50150,7 +48416,6 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { simd_mul(a, vdup_n_s32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] #[doc = "## Safety"] @@ -50177,7 +48442,6 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let ret_val: int32x2_t = simd_mul(a, vdup_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] #[doc = "## Safety"] @@ -50202,7 +48466,6 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { simd_mul(a, vdupq_n_s32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] #[doc = "## Safety"] @@ -50229,7 +48492,6 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let ret_val: int32x4_t = simd_mul(a, vdupq_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] #[doc = "## Safety"] @@ -50254,7 +48516,6 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { simd_mul(a, vdup_n_u16(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] #[doc = "## Safety"] @@ -50281,7 +48542,6 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { let ret_val: uint16x4_t = simd_mul(a, vdup_n_u16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] #[doc = "## Safety"] @@ -50306,7 +48566,6 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { simd_mul(a, vdupq_n_u16(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] #[doc = "## Safety"] @@ -50333,7 +48592,6 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { let ret_val: uint16x8_t = simd_mul(a, vdupq_n_u16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] #[doc = "## Safety"] @@ -50358,7 +48616,6 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { simd_mul(a, vdup_n_u32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] #[doc = "## Safety"] @@ -50385,7 +48642,6 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { let ret_val: uint32x2_t = simd_mul(a, vdup_n_u32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] #[doc = "## Safety"] @@ -50410,7 +48666,6 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { simd_mul(a, vdupq_n_u32(b)) } - #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] #[doc = "## Safety"] @@ -50437,7 +48692,6 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { let ret_val: uint32x4_t = simd_mul(a, vdupq_n_u32(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] #[doc = "## Safety"] @@ -50460,7 +48714,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -50470,7 +48724,6 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } _vmul_p8(a, b) } - #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] #[doc = "## Safety"] @@ -50493,7 +48746,7 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -50506,7 +48759,6 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = _vmul_p8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] #[doc = "## Safety"] @@ -50529,7 +48781,7 @@ pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -50539,7 +48791,6 @@ pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } _vmulq_p8(a, b) } - #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] #[doc = "## Safety"] @@ -50562,7 +48813,7 @@ pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -50579,7 +48830,6 @@ pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] #[doc = "## Safety"] @@ -50604,7 +48854,6 @@ pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] #[doc = "## Safety"] @@ -50632,7 +48881,6 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] #[doc = "## Safety"] @@ -50657,7 +48905,6 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] #[doc = "## Safety"] @@ -50685,7 +48932,6 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] #[doc = "## Safety"] @@ -50710,7 +48956,6 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] #[doc = "## Safety"] @@ -50738,7 +48983,6 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] #[doc = "## Safety"] @@ -50763,7 +49007,6 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] #[doc = "## Safety"] @@ -50791,7 +49034,6 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] #[doc = "## Safety"] @@ -50816,7 +49058,6 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] #[doc = "## Safety"] @@ -50844,7 +49085,6 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] #[doc = "## Safety"] @@ -50869,7 +49109,6 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] #[doc = "## Safety"] @@ -50897,7 +49136,6 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] #[doc = "## Safety"] @@ -50922,7 +49160,6 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] #[doc = "## Safety"] @@ -50950,7 +49187,6 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] #[doc = "## Safety"] @@ -50975,7 +49211,6 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] #[doc = "## Safety"] @@ -51003,7 +49238,6 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] #[doc = "## Safety"] @@ -51028,7 +49262,6 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] #[doc = "## Safety"] @@ -51056,7 +49289,6 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] #[doc = "## Safety"] @@ -51081,7 +49313,6 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] #[doc = "## Safety"] @@ -51113,7 +49344,6 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] #[doc = "## Safety"] @@ -51138,7 +49368,6 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] #[doc = "## Safety"] @@ -51166,7 +49395,6 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_mul(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] #[doc = "## Safety"] @@ -51191,7 +49419,6 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_mul(a, b) } - #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] #[doc = "## Safety"] @@ -51223,7 +49450,6 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] #[doc = "## Safety"] @@ -51253,7 +49479,6 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] #[doc = "## Safety"] @@ -51286,7 +49511,6 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] #[doc = "## Safety"] @@ -51316,7 +49540,6 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] #[doc = "## Safety"] @@ -51349,7 +49572,6 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] #[doc = "## Safety"] @@ -51376,7 +49598,6 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int static_assert_uimm_bits!(LANE, 1); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] #[doc = "## Safety"] @@ -51406,7 +49627,6 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] #[doc = "## Safety"] @@ -51433,7 +49653,6 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in static_assert_uimm_bits!(LANE, 2); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] #[doc = "## Safety"] @@ -51463,7 +49682,6 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] #[doc = "## Safety"] @@ -51493,7 +49711,6 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] #[doc = "## Safety"] @@ -51526,7 +49743,6 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] #[doc = "## Safety"] @@ -51556,7 +49772,6 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] #[doc = "## Safety"] @@ -51589,7 +49804,6 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] #[doc = "## Safety"] @@ -51616,7 +49830,6 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u static_assert_uimm_bits!(LANE, 1); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] #[doc = "## Safety"] @@ -51646,7 +49859,6 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] #[doc = "## Safety"] @@ -51673,7 +49885,6 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> static_assert_uimm_bits!(LANE, 2); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } - #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] #[doc = "## Safety"] @@ -51703,7 +49914,6 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] #[doc = "## Safety"] @@ -51728,7 +49938,6 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vmull_s16(a, vdup_n_s16(b)) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] #[doc = "## Safety"] @@ -51755,7 +49964,6 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { let ret_val: int32x4_t = vmull_s16(a, vdup_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] #[doc = "## Safety"] @@ -51780,7 +49988,6 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vmull_s32(a, vdup_n_s32(b)) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] #[doc = "## Safety"] @@ -51807,7 +50014,6 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { let ret_val: int64x2_t = vmull_s32(a, vdup_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] #[doc = "## Safety"] @@ -51832,7 +50038,6 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { vmull_u16(a, vdup_n_u16(b)) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] #[doc = "## Safety"] @@ -51859,7 +50064,6 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { let ret_val: uint32x4_t = vmull_u16(a, vdup_n_u16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] #[doc = "## Safety"] @@ -51884,7 +50088,6 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { vmull_u32(a, vdup_n_u32(b)) } - #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] #[doc = "## Safety"] @@ -51911,7 +50114,6 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { let ret_val: uint64x2_t = vmull_u32(a, vdup_n_u32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] #[doc = "## Safety"] @@ -51934,7 +50136,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull.v8i16" @@ -51944,7 +50146,6 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { } _vmull_p8(a, b) } - #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] #[doc = "## Safety"] @@ -51967,7 +50168,7 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull.v8i16" @@ -51980,7 +50181,6 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { let ret_val: poly16x8_t = _vmull_p8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] #[doc = "## Safety"] @@ -52003,7 +50203,7 @@ pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v4i16" @@ -52013,7 +50213,6 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } _vmull_s16(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] #[doc = "## Safety"] @@ -52036,7 +50235,7 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v4i16" @@ -52049,7 +50248,6 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = _vmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] #[doc = "## Safety"] @@ -52072,7 +50270,7 @@ pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v2i32" @@ -52082,7 +50280,6 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } _vmull_s32(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] #[doc = "## Safety"] @@ -52105,7 +50302,7 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v2i32" @@ -52118,7 +50315,6 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = _vmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] #[doc = "## Safety"] @@ -52141,7 +50337,7 @@ pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v8i8" @@ -52151,7 +50347,6 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { } _vmull_s8(a, b) } - #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] #[doc = "## Safety"] @@ -52174,7 +50369,7 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smull.v8i8" @@ -52187,7 +50382,6 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = _vmull_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] #[doc = "## Safety"] @@ -52210,7 +50404,7 @@ pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v8i8" @@ -52220,7 +50414,6 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { } _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] #[doc = "## Safety"] @@ -52243,7 +50436,7 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v8i8" @@ -52256,7 +50449,6 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] #[doc = "## Safety"] @@ -52279,7 +50471,7 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v4i16" @@ -52289,7 +50481,6 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { } _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] #[doc = "## Safety"] @@ -52312,7 +50503,7 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v4i16" @@ -52325,7 +50516,6 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] #[doc = "## Safety"] @@ -52348,7 +50538,7 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v2i32" @@ -52358,7 +50548,6 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] #[doc = "## Safety"] @@ -52381,7 +50570,7 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umull.v2i32" @@ -52394,7 +50583,6 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] #[doc = "## Safety"] @@ -52419,7 +50607,6 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] #[doc = "## Safety"] @@ -52446,7 +50633,6 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] #[doc = "## Safety"] @@ -52471,7 +50657,6 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] #[doc = "## Safety"] @@ -52498,7 +50683,6 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] #[doc = "## Safety"] @@ -52523,7 +50707,6 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] #[doc = "## Safety"] @@ -52550,7 +50733,6 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] #[doc = "## Safety"] @@ -52575,7 +50757,6 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] #[doc = "## Safety"] @@ -52606,7 +50787,6 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] #[doc = "## Safety"] @@ -52631,7 +50811,6 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] #[doc = "## Safety"] @@ -52658,7 +50837,6 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] #[doc = "## Safety"] @@ -52683,7 +50861,6 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] #[doc = "## Safety"] @@ -52710,7 +50887,6 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] #[doc = "## Safety"] @@ -52735,7 +50911,6 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] #[doc = "## Safety"] @@ -52762,7 +50937,6 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] #[doc = "## Safety"] @@ -52787,7 +50961,6 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { simd_neg(a) } - #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] #[doc = "## Safety"] @@ -52814,7 +50987,6 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_neg(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] #[doc = "## Safety"] @@ -52839,7 +51011,6 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] #[doc = "## Safety"] @@ -52867,7 +51038,6 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] #[doc = "## Safety"] @@ -52892,7 +51062,6 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] #[doc = "## Safety"] @@ -52924,7 +51093,6 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] #[doc = "## Safety"] @@ -52949,7 +51117,6 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] #[doc = "## Safety"] @@ -52977,7 +51144,6 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] #[doc = "## Safety"] @@ -53002,7 +51168,6 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] #[doc = "## Safety"] @@ -53030,7 +51195,6 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] #[doc = "## Safety"] @@ -53055,7 +51219,6 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] #[doc = "## Safety"] @@ -53083,7 +51246,6 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] #[doc = "## Safety"] @@ -53108,7 +51270,6 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] #[doc = "## Safety"] @@ -53136,7 +51297,6 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] #[doc = "## Safety"] @@ -53160,7 +51320,6 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] #[doc = "## Safety"] @@ -53185,7 +51344,6 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] #[doc = "## Safety"] @@ -53213,7 +51371,6 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] #[doc = "## Safety"] @@ -53238,7 +51395,6 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] #[doc = "## Safety"] @@ -53266,7 +51422,6 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] #[doc = "## Safety"] @@ -53291,7 +51446,6 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] #[doc = "## Safety"] @@ -53323,7 +51477,6 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] #[doc = "## Safety"] @@ -53348,7 +51501,6 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] #[doc = "## Safety"] @@ -53376,7 +51528,6 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] #[doc = "## Safety"] @@ -53401,7 +51552,6 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] #[doc = "## Safety"] @@ -53429,7 +51579,6 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] #[doc = "## Safety"] @@ -53454,7 +51603,6 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] #[doc = "## Safety"] @@ -53482,7 +51630,6 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] #[doc = "## Safety"] @@ -53507,7 +51654,6 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] #[doc = "## Safety"] @@ -53535,7 +51681,6 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] #[doc = "## Safety"] @@ -53559,7 +51704,6 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] #[doc = "## Safety"] @@ -53584,7 +51728,6 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_or(a, b) } - #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] #[doc = "## Safety"] @@ -53612,7 +51755,6 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_or(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] #[doc = "## Safety"] @@ -53646,7 +51788,6 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] #[doc = "## Safety"] @@ -53683,7 +51824,6 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { let ret_val: int16x4_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] #[doc = "## Safety"] @@ -53717,7 +51857,6 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] #[doc = "## Safety"] @@ -53754,7 +51893,6 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] #[doc = "## Safety"] @@ -53788,7 +51926,6 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] #[doc = "## Safety"] @@ -53825,7 +51962,6 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { let ret_val: int32x2_t = x; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] #[doc = "## Safety"] @@ -53859,7 +51995,6 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] #[doc = "## Safety"] @@ -53896,7 +52031,6 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] #[doc = "## Safety"] @@ -53930,7 +52064,6 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] #[doc = "## Safety"] @@ -53965,7 +52098,6 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] #[doc = "## Safety"] @@ -53999,7 +52131,6 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { }; x } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] #[doc = "## Safety"] @@ -54036,7 +52167,6 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = x; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] #[doc = "## Safety"] @@ -54070,7 +52200,6 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] #[doc = "## Safety"] @@ -54107,7 +52236,6 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] #[doc = "## Safety"] @@ -54141,7 +52269,6 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] #[doc = "## Safety"] @@ -54178,7 +52305,6 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] #[doc = "## Safety"] @@ -54212,7 +52338,6 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] #[doc = "## Safety"] @@ -54249,7 +52374,6 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = x; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] #[doc = "## Safety"] @@ -54283,7 +52407,6 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] #[doc = "## Safety"] @@ -54320,7 +52443,6 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = x; simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] #[doc = "## Safety"] @@ -54354,7 +52476,6 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] #[doc = "## Safety"] @@ -54389,7 +52510,6 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] #[doc = "## Safety"] @@ -54423,7 +52543,6 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { }; x } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] #[doc = "## Safety"] @@ -54460,7 +52579,6 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = x; simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] #[doc = "## Safety"] @@ -54483,7 +52601,7 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -54493,7 +52611,6 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vpadd_f32(a, b) } - #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] #[doc = "## Safety"] @@ -54516,7 +52633,7 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -54529,7 +52646,6 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vpadd_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] #[doc = "## Safety"] @@ -54552,7 +52668,7 @@ pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i8" @@ -54562,7 +52678,6 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vpadd_s8(a, b) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] #[doc = "## Safety"] @@ -54585,7 +52700,7 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v8i8" @@ -54598,7 +52713,6 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vpadd_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] #[doc = "## Safety"] @@ -54621,7 +52735,7 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i16" @@ -54631,7 +52745,6 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vpadd_s16(a, b) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] #[doc = "## Safety"] @@ -54654,7 +52767,7 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v4i16" @@ -54667,7 +52780,6 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vpadd_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] #[doc = "## Safety"] @@ -54690,7 +52802,7 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i32" @@ -54700,7 +52812,6 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vpadd_s32(a, b) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] #[doc = "## Safety"] @@ -54723,7 +52834,7 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.addp.v2i32" @@ -54736,7 +52847,6 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vpadd_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[doc = "## Safety"] @@ -54761,7 +52871,6 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vpadd_s8(transmute(a), transmute(b))) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[doc = "## Safety"] @@ -54789,7 +52898,6 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vpadd_s8(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[doc = "## Safety"] @@ -54814,7 +52922,6 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { transmute(vpadd_s16(transmute(a), transmute(b))) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[doc = "## Safety"] @@ -54842,7 +52949,6 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(vpadd_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[doc = "## Safety"] @@ -54867,7 +52973,6 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vpadd_s32(transmute(a), transmute(b))) } - #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[doc = "## Safety"] @@ -54895,7 +53000,6 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vpadd_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] #[doc = "## Safety"] @@ -54918,7 +53022,7 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" @@ -54928,7 +53032,6 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { } _vpaddl_s8(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] #[doc = "## Safety"] @@ -54951,7 +53054,7 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" @@ -54963,7 +53066,6 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { let ret_val: int16x4_t = _vpaddl_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] #[doc = "## Safety"] @@ -54986,7 +53088,7 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" @@ -54996,7 +53098,6 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { } _vpaddlq_s8(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] #[doc = "## Safety"] @@ -55019,7 +53120,7 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" @@ -55031,7 +53132,6 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = _vpaddlq_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] #[doc = "## Safety"] @@ -55054,7 +53154,7 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" @@ -55064,7 +53164,6 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { } _vpaddl_s16(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] #[doc = "## Safety"] @@ -55087,7 +53186,7 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" @@ -55099,7 +53198,6 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { let ret_val: int32x2_t = _vpaddl_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] #[doc = "## Safety"] @@ -55122,7 +53220,7 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" @@ -55132,7 +53230,6 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { } _vpaddlq_s16(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] #[doc = "## Safety"] @@ -55155,7 +53252,7 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" @@ -55167,7 +53264,6 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = _vpaddlq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] #[doc = "## Safety"] @@ -55190,7 +53286,7 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" @@ -55200,7 +53296,6 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { } _vpaddl_s32(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] #[doc = "## Safety"] @@ -55223,7 +53318,7 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" @@ -55234,7 +53329,6 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vpaddl_s32(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] #[doc = "## Safety"] @@ -55257,7 +53351,7 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" @@ -55267,7 +53361,6 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { } _vpaddlq_s32(a) } - #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] #[doc = "## Safety"] @@ -55290,7 +53383,7 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" @@ -55302,7 +53395,6 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = _vpaddlq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] #[doc = "## Safety"] @@ -55325,7 +53417,7 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" @@ -55335,7 +53427,6 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { } _vpaddl_u8(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] #[doc = "## Safety"] @@ -55358,7 +53449,7 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" @@ -55370,7 +53461,6 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = _vpaddl_u8(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] #[doc = "## Safety"] @@ -55393,7 +53483,7 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" @@ -55403,7 +53493,6 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { } _vpaddlq_u8(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] #[doc = "## Safety"] @@ -55426,7 +53515,7 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" @@ -55438,7 +53527,6 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = _vpaddlq_u8(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] #[doc = "## Safety"] @@ -55461,7 +53549,7 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" @@ -55471,7 +53559,6 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { } _vpaddl_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] #[doc = "## Safety"] @@ -55494,7 +53581,7 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" @@ -55506,7 +53593,6 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = _vpaddl_u16(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] #[doc = "## Safety"] @@ -55529,7 +53615,7 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" @@ -55539,7 +53625,6 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { } _vpaddlq_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] #[doc = "## Safety"] @@ -55562,7 +53647,7 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" @@ -55574,7 +53659,6 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = _vpaddlq_u16(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] #[doc = "## Safety"] @@ -55597,7 +53681,7 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" @@ -55607,7 +53691,6 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { } _vpaddl_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] #[doc = "## Safety"] @@ -55630,7 +53713,7 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" @@ -55641,7 +53724,6 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vpaddl_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] #[doc = "## Safety"] @@ -55664,7 +53746,7 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" @@ -55674,7 +53756,6 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { } _vpaddlq_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] #[doc = "## Safety"] @@ -55697,7 +53778,7 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" @@ -55709,7 +53790,6 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = _vpaddlq_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] #[doc = "## Safety"] @@ -55732,7 +53812,7 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v2f32" @@ -55742,7 +53822,6 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vpmax_f32(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] #[doc = "## Safety"] @@ -55765,7 +53844,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxp.v2f32" @@ -55778,7 +53857,6 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vpmax_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] #[doc = "## Safety"] @@ -55801,7 +53879,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v8i8" @@ -55811,7 +53889,6 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vpmax_s8(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] #[doc = "## Safety"] @@ -55834,7 +53911,7 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v8i8" @@ -55847,7 +53924,6 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vpmax_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] #[doc = "## Safety"] @@ -55870,7 +53946,7 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v4i16" @@ -55880,7 +53956,6 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vpmax_s16(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] #[doc = "## Safety"] @@ -55903,7 +53978,7 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v4i16" @@ -55916,7 +53991,6 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vpmax_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] #[doc = "## Safety"] @@ -55939,7 +54013,7 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v2i32" @@ -55949,7 +54023,6 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vpmax_s32(a, b) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] #[doc = "## Safety"] @@ -55972,7 +54045,7 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.smaxp.v2i32" @@ -55985,7 +54058,6 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vpmax_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] #[doc = "## Safety"] @@ -56008,7 +54080,7 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v8i8" @@ -56018,7 +54090,6 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] #[doc = "## Safety"] @@ -56041,7 +54112,7 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v8i8" @@ -56054,7 +54125,6 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] #[doc = "## Safety"] @@ -56077,7 +54147,7 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v4i16" @@ -56087,7 +54157,6 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] #[doc = "## Safety"] @@ -56110,7 +54179,7 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v4i16" @@ -56123,7 +54192,6 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] #[doc = "## Safety"] @@ -56146,7 +54214,7 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v2i32" @@ -56156,7 +54224,6 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] #[doc = "## Safety"] @@ -56179,7 +54246,7 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v2i32" @@ -56192,7 +54259,6 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] #[doc = "## Safety"] @@ -56215,7 +54281,7 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v2f32" @@ -56225,7 +54291,6 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vpmin_f32(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] #[doc = "## Safety"] @@ -56248,7 +54313,7 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminp.v2f32" @@ -56261,7 +54326,6 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vpmin_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] #[doc = "## Safety"] @@ -56284,7 +54348,7 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v8i8" @@ -56294,7 +54358,6 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vpmin_s8(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] #[doc = "## Safety"] @@ -56317,7 +54380,7 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v8i8" @@ -56330,7 +54393,6 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vpmin_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] #[doc = "## Safety"] @@ -56353,7 +54415,7 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v4i16" @@ -56363,7 +54425,6 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vpmin_s16(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] #[doc = "## Safety"] @@ -56386,7 +54447,7 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v4i16" @@ -56399,7 +54460,6 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vpmin_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] #[doc = "## Safety"] @@ -56422,7 +54482,7 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v2i32" @@ -56432,7 +54492,6 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vpmin_s32(a, b) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] #[doc = "## Safety"] @@ -56455,7 +54514,7 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sminp.v2i32" @@ -56468,7 +54527,6 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vpmin_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] #[doc = "## Safety"] @@ -56491,7 +54549,7 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v8i8" @@ -56501,7 +54559,6 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] #[doc = "## Safety"] @@ -56524,7 +54581,7 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v8i8" @@ -56537,7 +54594,6 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] #[doc = "## Safety"] @@ -56560,7 +54616,7 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v4i16" @@ -56570,7 +54626,6 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] #[doc = "## Safety"] @@ -56593,7 +54648,7 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v4i16" @@ -56606,7 +54661,6 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] #[doc = "## Safety"] @@ -56629,7 +54683,7 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v2i32" @@ -56639,7 +54693,6 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] #[doc = "## Safety"] @@ -56662,7 +54715,7 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v2i32" @@ -56675,7 +54728,6 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] #[doc = "## Safety"] @@ -56698,7 +54750,7 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i8" @@ -56708,7 +54760,6 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { } _vqabs_s8(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] #[doc = "## Safety"] @@ -56731,7 +54782,7 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i8" @@ -56743,7 +54794,6 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqabs_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] #[doc = "## Safety"] @@ -56766,7 +54816,7 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v16i8" @@ -56776,7 +54826,6 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { } _vqabsq_s8(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] #[doc = "## Safety"] @@ -56799,7 +54848,7 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v16i8" @@ -56815,7 +54864,6 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] #[doc = "## Safety"] @@ -56838,7 +54886,7 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i16" @@ -56848,7 +54896,6 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { } _vqabs_s16(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] #[doc = "## Safety"] @@ -56871,7 +54918,7 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i16" @@ -56883,7 +54930,6 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqabs_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] #[doc = "## Safety"] @@ -56906,7 +54952,7 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i16" @@ -56916,7 +54962,6 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { } _vqabsq_s16(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] #[doc = "## Safety"] @@ -56939,7 +54984,7 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v8i16" @@ -56951,7 +54996,6 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqabsq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] #[doc = "## Safety"] @@ -56974,7 +55018,7 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i32" @@ -56984,7 +55028,6 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { } _vqabs_s32(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] #[doc = "## Safety"] @@ -57007,7 +55050,7 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i32" @@ -57019,7 +55062,6 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqabs_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] #[doc = "## Safety"] @@ -57042,7 +55084,7 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i32" @@ -57052,7 +55094,6 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { } _vqabsq_s32(a) } - #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] #[doc = "## Safety"] @@ -57075,7 +55116,7 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v4i32" @@ -57087,7 +55128,6 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqabsq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] #[doc = "## Safety"] @@ -57110,7 +55150,7 @@ pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i8" @@ -57120,7 +55160,6 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vqadd_s8(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] #[doc = "## Safety"] @@ -57143,7 +55182,7 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i8" @@ -57156,7 +55195,6 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqadd_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] #[doc = "## Safety"] @@ -57179,7 +55217,7 @@ pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v16i8" @@ -57189,7 +55227,6 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vqaddq_s8(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] #[doc = "## Safety"] @@ -57212,7 +55249,7 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v16i8" @@ -57229,7 +55266,6 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] #[doc = "## Safety"] @@ -57252,7 +55288,7 @@ pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i16" @@ -57262,7 +55298,6 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqadd_s16(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] #[doc = "## Safety"] @@ -57285,7 +55320,7 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i16" @@ -57298,7 +55333,6 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqadd_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] #[doc = "## Safety"] @@ -57321,7 +55355,7 @@ pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i16" @@ -57331,7 +55365,6 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqaddq_s16(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] #[doc = "## Safety"] @@ -57354,7 +55387,7 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v8i16" @@ -57367,7 +55400,6 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqaddq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] #[doc = "## Safety"] @@ -57390,7 +55422,7 @@ pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i32" @@ -57400,7 +55432,6 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqadd_s32(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] #[doc = "## Safety"] @@ -57423,7 +55454,7 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i32" @@ -57436,7 +55467,6 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqadd_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] #[doc = "## Safety"] @@ -57459,7 +55489,7 @@ pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i32" @@ -57469,7 +55499,6 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqaddq_s32(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] #[doc = "## Safety"] @@ -57492,7 +55521,7 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v4i32" @@ -57505,7 +55534,6 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqaddq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] #[doc = "## Safety"] @@ -57527,7 +55555,7 @@ pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v1i64" @@ -57537,7 +55565,6 @@ pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vqadd_s64(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] #[doc = "## Safety"] @@ -57560,7 +55587,7 @@ pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i64" @@ -57570,7 +55597,6 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vqaddq_s64(a, b) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] #[doc = "## Safety"] @@ -57593,7 +55619,7 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.v2i64" @@ -57606,7 +55632,6 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqaddq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] #[doc = "## Safety"] @@ -57629,7 +55654,7 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i8" @@ -57639,7 +55664,6 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] #[doc = "## Safety"] @@ -57662,7 +55686,7 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i8" @@ -57675,7 +55699,6 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] #[doc = "## Safety"] @@ -57698,7 +55721,7 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v16i8" @@ -57708,7 +55731,6 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] #[doc = "## Safety"] @@ -57731,7 +55753,7 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v16i8" @@ -57748,7 +55770,6 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] #[doc = "## Safety"] @@ -57771,7 +55792,7 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i16" @@ -57781,7 +55802,6 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] #[doc = "## Safety"] @@ -57804,7 +55824,7 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i16" @@ -57817,7 +55837,6 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] #[doc = "## Safety"] @@ -57840,7 +55859,7 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i16" @@ -57850,7 +55869,6 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] #[doc = "## Safety"] @@ -57873,7 +55891,7 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v8i16" @@ -57886,7 +55904,6 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] #[doc = "## Safety"] @@ -57909,7 +55926,7 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i32" @@ -57919,7 +55936,6 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] #[doc = "## Safety"] @@ -57942,7 +55958,7 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i32" @@ -57955,7 +55971,6 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] #[doc = "## Safety"] @@ -57978,7 +55993,7 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i32" @@ -57988,7 +56003,6 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] #[doc = "## Safety"] @@ -58011,7 +56025,7 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v4i32" @@ -58024,7 +56038,6 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] #[doc = "## Safety"] @@ -58046,7 +56059,7 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v1i64" @@ -58056,7 +56069,6 @@ pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] #[doc = "## Safety"] @@ -58079,7 +56091,7 @@ pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i64" @@ -58089,7 +56101,6 @@ pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] #[doc = "## Safety"] @@ -58112,7 +56123,7 @@ pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.v2i64" @@ -58125,7 +56136,6 @@ pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] #[doc = "## Safety"] @@ -58156,7 +56166,6 @@ pub unsafe fn vqdmlal_lane_s16( static_assert_uimm_bits!(N, 2); vqaddq_s32(a, vqdmull_lane_s16::(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] #[doc = "## Safety"] @@ -58191,7 +56200,6 @@ pub unsafe fn vqdmlal_lane_s16( let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_lane_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] #[doc = "## Safety"] @@ -58222,7 +56230,6 @@ pub unsafe fn vqdmlal_lane_s32( static_assert_uimm_bits!(N, 1); vqaddq_s64(a, vqdmull_lane_s32::(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] #[doc = "## Safety"] @@ -58257,7 +56264,6 @@ pub unsafe fn vqdmlal_lane_s32( let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_lane_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] #[doc = "## Safety"] @@ -58282,7 +56288,6 @@ pub unsafe fn vqdmlal_lane_s32( pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_n_s16(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] #[doc = "## Safety"] @@ -58310,7 +56315,6 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_n_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] #[doc = "## Safety"] @@ -58335,7 +56339,6 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_n_s32(b, c)) } - #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] #[doc = "## Safety"] @@ -58363,7 +56366,6 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_n_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] #[doc = "## Safety"] @@ -58388,7 +56390,6 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqaddq_s32(a, vqdmull_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] #[doc = "## Safety"] @@ -58417,7 +56418,6 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] #[doc = "## Safety"] @@ -58442,7 +56442,6 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqaddq_s64(a, vqdmull_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] #[doc = "## Safety"] @@ -58471,7 +56470,6 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] #[doc = "## Safety"] @@ -58502,7 +56500,6 @@ pub unsafe fn vqdmlsl_lane_s16( static_assert_uimm_bits!(N, 2); vqsubq_s32(a, vqdmull_lane_s16::(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] #[doc = "## Safety"] @@ -58537,7 +56534,6 @@ pub unsafe fn vqdmlsl_lane_s16( let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_lane_s16::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] #[doc = "## Safety"] @@ -58568,7 +56564,6 @@ pub unsafe fn vqdmlsl_lane_s32( static_assert_uimm_bits!(N, 1); vqsubq_s64(a, vqdmull_lane_s32::(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] #[doc = "## Safety"] @@ -58603,7 +56598,6 @@ pub unsafe fn vqdmlsl_lane_s32( let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_lane_s32::(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] #[doc = "## Safety"] @@ -58628,7 +56622,6 @@ pub unsafe fn vqdmlsl_lane_s32( pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_n_s16(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] #[doc = "## Safety"] @@ -58656,7 +56649,6 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_n_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] #[doc = "## Safety"] @@ -58681,7 +56673,6 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_n_s32(b, c)) } - #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] #[doc = "## Safety"] @@ -58709,7 +56700,6 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_n_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] #[doc = "## Safety"] @@ -58734,7 +56724,6 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqsubq_s32(a, vqdmull_s16(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] #[doc = "## Safety"] @@ -58763,7 +56752,6 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_s16(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] #[doc = "## Safety"] @@ -58788,7 +56776,6 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqsubq_s64(a, vqdmull_s32(b, c)) } - #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] #[doc = "## Safety"] @@ -58817,7 +56804,6 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_s32(b, c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] #[doc = "## Safety"] @@ -58844,7 +56830,6 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> static_assert_uimm_bits!(LANE, 3); vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] #[doc = "## Safety"] @@ -58874,7 +56859,6 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] #[doc = "## Safety"] @@ -58901,7 +56885,6 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> static_assert_uimm_bits!(LANE, 3); vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] #[doc = "## Safety"] @@ -58931,7 +56914,6 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] #[doc = "## Safety"] @@ -58958,7 +56940,6 @@ pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> static_assert_uimm_bits!(LANE, 2); vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] #[doc = "## Safety"] @@ -58988,7 +56969,6 @@ pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] #[doc = "## Safety"] @@ -59015,7 +56995,6 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> static_assert_uimm_bits!(LANE, 2); vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) } - #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] #[doc = "## Safety"] @@ -59045,7 +57024,6 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] #[doc = "## Safety"] @@ -59071,7 +57049,6 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmulh_s16(a, b) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] #[doc = "## Safety"] @@ -59099,7 +57076,6 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let ret_val: int16x4_t = vqdmulh_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] #[doc = "## Safety"] @@ -59125,7 +57101,6 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let b: int16x8_t = vdupq_n_s16(b); vqdmulhq_s16(a, b) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] #[doc = "## Safety"] @@ -59153,7 +57128,6 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let ret_val: int16x8_t = vqdmulhq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] #[doc = "## Safety"] @@ -59179,7 +57153,6 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmulh_s32(a, b) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] #[doc = "## Safety"] @@ -59207,7 +57180,6 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let ret_val: int32x2_t = vqdmulh_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] #[doc = "## Safety"] @@ -59233,7 +57205,6 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let b: int32x4_t = vdupq_n_s32(b); vqdmulhq_s32(a, b) } - #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] #[doc = "## Safety"] @@ -59261,7 +57232,6 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let ret_val: int32x4_t = vqdmulhq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] #[doc = "## Safety"] @@ -59284,7 +57254,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59294,7 +57264,6 @@ pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqdmulh_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] #[doc = "## Safety"] @@ -59317,7 +57286,7 @@ pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59330,7 +57299,6 @@ pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqdmulh_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] #[doc = "## Safety"] @@ -59353,7 +57321,7 @@ pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59363,7 +57331,6 @@ pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqdmulhq_s16(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] #[doc = "## Safety"] @@ -59386,7 +57353,7 @@ pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59399,7 +57366,6 @@ pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqdmulhq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] #[doc = "## Safety"] @@ -59422,7 +57388,7 @@ pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59432,7 +57398,6 @@ pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqdmulh_s32(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] #[doc = "## Safety"] @@ -59455,7 +57420,7 @@ pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59468,7 +57433,6 @@ pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqdmulh_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] #[doc = "## Safety"] @@ -59491,7 +57455,7 @@ pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59501,7 +57465,6 @@ pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqdmulhq_s32(a, b) } - #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] #[doc = "## Safety"] @@ -59524,7 +57487,7 @@ pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59537,7 +57500,6 @@ pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqdmulhq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] #[doc = "## Safety"] @@ -59565,7 +57527,6 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] #[doc = "## Safety"] @@ -59596,7 +57557,6 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 let ret_val: int32x4_t = vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] #[doc = "## Safety"] @@ -59624,7 +57584,6 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } - #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] #[doc = "## Safety"] @@ -59655,7 +57614,6 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 let ret_val: int64x2_t = vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] #[doc = "## Safety"] @@ -59680,7 +57638,6 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vqdmull_s16(a, vdup_n_s16(b)) } - #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] #[doc = "## Safety"] @@ -59707,7 +57664,6 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { let ret_val: int32x4_t = vqdmull_s16(a, vdup_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] #[doc = "## Safety"] @@ -59732,7 +57688,6 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vqdmull_s32(a, vdup_n_s32(b)) } - #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] #[doc = "## Safety"] @@ -59759,7 +57714,6 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { let ret_val: int64x2_t = vqdmull_s32(a, vdup_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] #[doc = "## Safety"] @@ -59782,7 +57736,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59792,7 +57746,6 @@ pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } _vqdmull_s16(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] #[doc = "## Safety"] @@ -59815,7 +57768,7 @@ pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59828,7 +57781,6 @@ pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqdmull_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] #[doc = "## Safety"] @@ -59851,7 +57803,7 @@ pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59861,7 +57813,6 @@ pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } _vqdmull_s32(a, b) } - #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] #[doc = "## Safety"] @@ -59884,7 +57835,7 @@ pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59897,7 +57848,6 @@ pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqdmull_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] #[doc = "## Safety"] @@ -59920,7 +57870,7 @@ pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59930,7 +57880,6 @@ pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { } _vqmovn_s16(a) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] #[doc = "## Safety"] @@ -59953,7 +57902,7 @@ pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59965,7 +57914,6 @@ pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqmovn_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] #[doc = "## Safety"] @@ -59988,7 +57936,7 @@ pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -59998,7 +57946,6 @@ pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { } _vqmovn_s32(a) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] #[doc = "## Safety"] @@ -60021,7 +57968,7 @@ pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60033,7 +57980,6 @@ pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqmovn_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] #[doc = "## Safety"] @@ -60056,7 +58002,7 @@ pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60066,7 +58012,6 @@ pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { } _vqmovn_s64(a) } - #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] #[doc = "## Safety"] @@ -60089,7 +58034,7 @@ pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60101,7 +58046,6 @@ pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqmovn_s64(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] #[doc = "## Safety"] @@ -60124,7 +58068,7 @@ pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60134,7 +58078,6 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { } _vqmovn_u16(a.as_signed()).as_unsigned() } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] #[doc = "## Safety"] @@ -60157,7 +58100,7 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60169,7 +58112,6 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqmovn_u16(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] #[doc = "## Safety"] @@ -60192,7 +58134,7 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60202,7 +58144,6 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { } _vqmovn_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] #[doc = "## Safety"] @@ -60225,7 +58166,7 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60237,7 +58178,6 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqmovn_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] #[doc = "## Safety"] @@ -60260,7 +58200,7 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60270,7 +58210,6 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { } _vqmovn_u64(a.as_signed()).as_unsigned() } - #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] #[doc = "## Safety"] @@ -60293,7 +58232,7 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60305,7 +58244,6 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqmovn_u64(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] #[doc = "## Safety"] @@ -60328,7 +58266,7 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60338,7 +58276,6 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { } _vqmovun_s16(a).as_unsigned() } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] #[doc = "## Safety"] @@ -60361,7 +58298,7 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60373,7 +58310,6 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqmovun_s16(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] #[doc = "## Safety"] @@ -60396,7 +58332,7 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60406,7 +58342,6 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { } _vqmovun_s32(a).as_unsigned() } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] #[doc = "## Safety"] @@ -60429,7 +58364,7 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60441,7 +58376,6 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqmovun_s32(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] #[doc = "## Safety"] @@ -60464,7 +58398,7 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60474,7 +58408,6 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { } _vqmovun_s64(a).as_unsigned() } - #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] #[doc = "## Safety"] @@ -60497,7 +58430,7 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -60509,7 +58442,6 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqmovun_s64(a).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] #[doc = "## Safety"] @@ -60532,7 +58464,7 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i8" @@ -60542,7 +58474,6 @@ pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { } _vqneg_s8(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] #[doc = "## Safety"] @@ -60565,7 +58496,7 @@ pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i8" @@ -60577,7 +58508,6 @@ pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqneg_s8(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] #[doc = "## Safety"] @@ -60600,7 +58530,7 @@ pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v16i8" @@ -60610,7 +58540,6 @@ pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { } _vqnegq_s8(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] #[doc = "## Safety"] @@ -60633,7 +58562,7 @@ pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v16i8" @@ -60649,7 +58578,6 @@ pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] #[doc = "## Safety"] @@ -60672,7 +58600,7 @@ pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i16" @@ -60682,7 +58610,6 @@ pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { } _vqneg_s16(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] #[doc = "## Safety"] @@ -60705,7 +58632,7 @@ pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i16" @@ -60717,7 +58644,6 @@ pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqneg_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] #[doc = "## Safety"] @@ -60740,7 +58666,7 @@ pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i16" @@ -60750,7 +58676,6 @@ pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { } _vqnegq_s16(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] #[doc = "## Safety"] @@ -60773,7 +58698,7 @@ pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v8i16" @@ -60785,7 +58710,6 @@ pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqnegq_s16(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] #[doc = "## Safety"] @@ -60808,7 +58732,7 @@ pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i32" @@ -60818,7 +58742,6 @@ pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { } _vqneg_s32(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] #[doc = "## Safety"] @@ -60841,7 +58764,7 @@ pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i32" @@ -60853,7 +58776,6 @@ pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqneg_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] #[doc = "## Safety"] @@ -60876,7 +58798,7 @@ pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i32" @@ -60886,7 +58808,6 @@ pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { } _vqnegq_s32(a) } - #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] #[doc = "## Safety"] @@ -60909,7 +58830,7 @@ pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v4i32" @@ -60921,7 +58842,6 @@ pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqnegq_s32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] #[doc = "## Safety"] @@ -60949,7 +58869,6 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulh_s16(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] #[doc = "## Safety"] @@ -60980,7 +58899,6 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> let ret_val: int16x4_t = vqrdmulh_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] #[doc = "## Safety"] @@ -61008,7 +58926,6 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); vqrdmulh_s32(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] #[doc = "## Safety"] @@ -61039,7 +58956,6 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> let ret_val: int32x2_t = vqrdmulh_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] #[doc = "## Safety"] @@ -61067,7 +58983,6 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulh_s16(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] #[doc = "## Safety"] @@ -61098,7 +59013,6 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> let ret_val: int16x4_t = vqrdmulh_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] #[doc = "## Safety"] @@ -61126,7 +59040,6 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); vqrdmulh_s32(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] #[doc = "## Safety"] @@ -61157,7 +59070,6 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> let ret_val: int32x2_t = vqrdmulh_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] #[doc = "## Safety"] @@ -61198,7 +59110,6 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> ); vqrdmulhq_s16(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] #[doc = "## Safety"] @@ -61242,7 +59153,6 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> let ret_val: int16x8_t = vqrdmulhq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] #[doc = "## Safety"] @@ -61270,7 +59180,6 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s32(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] #[doc = "## Safety"] @@ -61301,7 +59210,6 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> let ret_val: int32x4_t = vqrdmulhq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] #[doc = "## Safety"] @@ -61342,7 +59250,6 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - ); vqrdmulhq_s16(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] #[doc = "## Safety"] @@ -61386,7 +59293,6 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - let ret_val: int16x8_t = vqrdmulhq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] #[doc = "## Safety"] @@ -61414,7 +59320,6 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s32(a, b) } - #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] #[doc = "## Safety"] @@ -61445,7 +59350,6 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - let ret_val: int32x4_t = vqrdmulhq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] #[doc = "## Safety"] @@ -61470,7 +59374,6 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { vqrdmulh_s16(a, vdup_n_s16(b)) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] #[doc = "## Safety"] @@ -61497,7 +59400,6 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let ret_val: int16x4_t = vqrdmulh_s16(a, vdup_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] #[doc = "## Safety"] @@ -61522,7 +59424,6 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { vqrdmulhq_s16(a, vdupq_n_s16(b)) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] #[doc = "## Safety"] @@ -61549,7 +59450,6 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let ret_val: int16x8_t = vqrdmulhq_s16(a, vdupq_n_s16(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] #[doc = "## Safety"] @@ -61574,7 +59474,6 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { vqrdmulh_s32(a, vdup_n_s32(b)) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] #[doc = "## Safety"] @@ -61601,7 +59500,6 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let ret_val: int32x2_t = vqrdmulh_s32(a, vdup_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] #[doc = "## Safety"] @@ -61626,7 +59524,6 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { vqrdmulhq_s32(a, vdupq_n_s32(b)) } - #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] #[doc = "## Safety"] @@ -61653,7 +59550,6 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let ret_val: int32x4_t = vqrdmulhq_s32(a, vdupq_n_s32(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] #[doc = "## Safety"] @@ -61676,7 +59572,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61686,7 +59582,6 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqrdmulh_s16(a, b) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] #[doc = "## Safety"] @@ -61709,7 +59604,7 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61722,7 +59617,6 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqrdmulh_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] #[doc = "## Safety"] @@ -61745,7 +59639,7 @@ pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61755,7 +59649,6 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqrdmulhq_s16(a, b) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] #[doc = "## Safety"] @@ -61778,7 +59671,7 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61791,7 +59684,6 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqrdmulhq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] #[doc = "## Safety"] @@ -61814,7 +59706,7 @@ pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61824,7 +59716,6 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqrdmulh_s32(a, b) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] #[doc = "## Safety"] @@ -61847,7 +59738,7 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61860,7 +59751,6 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqrdmulh_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] #[doc = "## Safety"] @@ -61883,7 +59773,7 @@ pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61893,7 +59783,6 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqrdmulhq_s32(a, b) } - #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] #[doc = "## Safety"] @@ -61916,7 +59805,7 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61929,7 +59818,6 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqrdmulhq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] #[doc = "## Safety"] @@ -61952,7 +59840,7 @@ pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61962,7 +59850,6 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vqrshl_s8(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] #[doc = "## Safety"] @@ -61985,7 +59872,7 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -61998,7 +59885,6 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqrshl_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] #[doc = "## Safety"] @@ -62021,7 +59907,7 @@ pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62031,7 +59917,6 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vqrshlq_s8(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] #[doc = "## Safety"] @@ -62054,7 +59939,7 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62071,7 +59956,6 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] #[doc = "## Safety"] @@ -62094,7 +59978,7 @@ pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62104,7 +59988,6 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqrshl_s16(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] #[doc = "## Safety"] @@ -62127,7 +60010,7 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62140,7 +60023,6 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqrshl_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] #[doc = "## Safety"] @@ -62163,7 +60045,7 @@ pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62173,7 +60055,6 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqrshlq_s16(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] #[doc = "## Safety"] @@ -62196,7 +60077,7 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62209,7 +60090,6 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqrshlq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] #[doc = "## Safety"] @@ -62232,7 +60112,7 @@ pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62242,7 +60122,6 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqrshl_s32(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] #[doc = "## Safety"] @@ -62265,7 +60144,7 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62278,7 +60157,6 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqrshl_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] #[doc = "## Safety"] @@ -62301,7 +60179,7 @@ pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62311,7 +60189,6 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqrshlq_s32(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] #[doc = "## Safety"] @@ -62334,7 +60211,7 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62347,7 +60224,6 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqrshlq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] #[doc = "## Safety"] @@ -62369,7 +60245,7 @@ pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62379,7 +60255,6 @@ pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vqrshl_s64(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] #[doc = "## Safety"] @@ -62402,7 +60277,7 @@ pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62412,7 +60287,6 @@ pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vqrshlq_s64(a, b) } - #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] #[doc = "## Safety"] @@ -62435,7 +60309,7 @@ pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62448,7 +60322,6 @@ pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqrshlq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] #[doc = "## Safety"] @@ -62471,7 +60344,7 @@ pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62481,7 +60354,6 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } _vqrshl_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] #[doc = "## Safety"] @@ -62504,7 +60376,7 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62517,7 +60389,6 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqrshl_u8(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] #[doc = "## Safety"] @@ -62540,7 +60411,7 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62550,7 +60421,6 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } _vqrshlq_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] #[doc = "## Safety"] @@ -62573,7 +60443,7 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62590,7 +60460,6 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] #[doc = "## Safety"] @@ -62613,7 +60482,7 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62623,7 +60492,6 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } _vqrshl_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] #[doc = "## Safety"] @@ -62646,7 +60514,7 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62659,7 +60527,6 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqrshl_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] #[doc = "## Safety"] @@ -62682,7 +60549,7 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62692,7 +60559,6 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } _vqrshlq_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] #[doc = "## Safety"] @@ -62715,7 +60581,7 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62728,7 +60594,6 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vqrshlq_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] #[doc = "## Safety"] @@ -62751,7 +60616,7 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62761,7 +60626,6 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } _vqrshl_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] #[doc = "## Safety"] @@ -62784,7 +60648,7 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62797,7 +60661,6 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqrshl_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] #[doc = "## Safety"] @@ -62820,7 +60683,7 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62830,7 +60693,6 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } _vqrshlq_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] #[doc = "## Safety"] @@ -62853,7 +60715,7 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62866,7 +60728,6 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vqrshlq_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] #[doc = "## Safety"] @@ -62888,7 +60749,7 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62898,7 +60759,6 @@ pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } _vqrshl_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] #[doc = "## Safety"] @@ -62921,7 +60781,7 @@ pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62931,7 +60791,6 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } _vqrshlq_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] #[doc = "## Safety"] @@ -62954,7 +60813,7 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -62967,7 +60826,6 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vqrshlq_u64(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] #[doc = "## Safety"] @@ -62981,7 +60839,7 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -62995,7 +60853,6 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { }, ) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] #[doc = "## Safety"] @@ -63009,7 +60866,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -63025,7 +60882,6 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] #[doc = "## Safety"] @@ -63039,7 +60895,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63048,7 +60904,6 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] #[doc = "## Safety"] @@ -63062,7 +60917,7 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63073,7 +60928,6 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] #[doc = "## Safety"] @@ -63087,13 +60941,12 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] #[doc = "## Safety"] @@ -63107,7 +60960,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -63115,7 +60968,6 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] #[doc = "## Safety"] @@ -63129,7 +60981,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v8i8" @@ -63138,7 +60990,6 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { } _vqrshrn_n_s16(a, N) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] #[doc = "## Safety"] @@ -63152,7 +61003,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v8i8" @@ -63163,7 +61014,6 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqrshrn_n_s16(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] #[doc = "## Safety"] @@ -63177,7 +61027,7 @@ pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v4i16" @@ -63186,7 +61036,6 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { } _vqrshrn_n_s32(a, N) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] #[doc = "## Safety"] @@ -63200,7 +61049,7 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v4i16" @@ -63211,7 +61060,6 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqrshrn_n_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] #[doc = "## Safety"] @@ -63225,7 +61073,7 @@ pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v2i32" @@ -63234,7 +61082,6 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { } _vqrshrn_n_s64(a, N) } - #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] #[doc = "## Safety"] @@ -63248,7 +61095,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrn.v2i32" @@ -63259,7 +61106,6 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqrshrn_n_s64(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] #[doc = "## Safety"] @@ -63273,7 +61119,7 @@ pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -63289,7 +61135,6 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] #[doc = "## Safety"] @@ -63303,7 +61148,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -63321,7 +61166,6 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] #[doc = "## Safety"] @@ -63335,7 +61179,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63345,7 +61189,6 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] #[doc = "## Safety"] @@ -63359,7 +61202,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63371,7 +61214,6 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] #[doc = "## Safety"] @@ -63385,7 +61227,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -63395,7 +61237,6 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { ) .as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] #[doc = "## Safety"] @@ -63409,7 +61250,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -63421,7 +61262,6 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] #[doc = "## Safety"] @@ -63435,7 +61275,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v8i8" @@ -63444,7 +61284,6 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] #[doc = "## Safety"] @@ -63458,7 +61297,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v8i8" @@ -63469,7 +61308,6 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqrshrn_n_u16(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] #[doc = "## Safety"] @@ -63483,7 +61321,7 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v4i16" @@ -63492,7 +61330,6 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] #[doc = "## Safety"] @@ -63506,7 +61343,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v4i16" @@ -63517,7 +61354,6 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqrshrn_n_u32(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] #[doc = "## Safety"] @@ -63531,7 +61367,7 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v2i32" @@ -63540,7 +61376,6 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] #[doc = "## Safety"] @@ -63554,7 +61389,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v2i32" @@ -63565,7 +61400,6 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqrshrn_n_u64(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] #[doc = "## Safety"] @@ -63579,7 +61413,7 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -63594,7 +61428,6 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] #[doc = "## Safety"] @@ -63608,7 +61441,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -63625,7 +61458,6 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] #[doc = "## Safety"] @@ -63639,7 +61471,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63649,7 +61481,6 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] #[doc = "## Safety"] @@ -63663,7 +61494,7 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -63675,7 +61506,6 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] #[doc = "## Safety"] @@ -63689,13 +61519,12 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] #[doc = "## Safety"] @@ -63709,7 +61538,7 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -63718,7 +61547,6 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] #[doc = "## Safety"] @@ -63732,7 +61560,7 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v8i8" @@ -63741,7 +61569,6 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { } _vqrshrun_n_s16(a, N).as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] #[doc = "## Safety"] @@ -63755,7 +61582,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v8i8" @@ -63766,7 +61593,6 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqrshrun_n_s16(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] #[doc = "## Safety"] @@ -63780,7 +61606,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v4i16" @@ -63789,7 +61615,6 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { } _vqrshrun_n_s32(a, N).as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] #[doc = "## Safety"] @@ -63803,7 +61628,7 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v4i16" @@ -63814,7 +61639,6 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqrshrun_n_s32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] #[doc = "## Safety"] @@ -63828,7 +61652,7 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v2i32" @@ -63837,7 +61661,6 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { } _vqrshrun_n_s64(a, N).as_unsigned() } - #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] #[doc = "## Safety"] @@ -63851,7 +61674,7 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v2i32" @@ -63862,7 +61685,6 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqrshrun_n_s64(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] #[doc = "## Safety"] @@ -63889,7 +61711,6 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); vqshl_s8(a, vdup_n_s8(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] #[doc = "## Safety"] @@ -63918,7 +61739,6 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vqshl_s8(a, vdup_n_s8(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] #[doc = "## Safety"] @@ -63945,7 +61765,6 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); vqshlq_s8(a, vdupq_n_s8(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] #[doc = "## Safety"] @@ -63978,7 +61797,6 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] #[doc = "## Safety"] @@ -64005,7 +61823,6 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); vqshl_s16(a, vdup_n_s16(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] #[doc = "## Safety"] @@ -64034,7 +61851,6 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = vqshl_s16(a, vdup_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] #[doc = "## Safety"] @@ -64061,7 +61877,6 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); vqshlq_s16(a, vdupq_n_s16(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] #[doc = "## Safety"] @@ -64090,7 +61905,6 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = vqshlq_s16(a, vdupq_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] #[doc = "## Safety"] @@ -64117,7 +61931,6 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 5); vqshl_s32(a, vdup_n_s32(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] #[doc = "## Safety"] @@ -64146,7 +61959,6 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = vqshl_s32(a, vdup_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] #[doc = "## Safety"] @@ -64173,7 +61985,6 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 5); vqshlq_s32(a, vdupq_n_s32(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] #[doc = "## Safety"] @@ -64202,7 +62013,6 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = vqshlq_s32(a, vdupq_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] #[doc = "## Safety"] @@ -64228,7 +62038,6 @@ pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_uimm_bits!(N, 6); vqshl_s64(a, vdup_n_s64(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] #[doc = "## Safety"] @@ -64255,7 +62064,6 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 6); vqshlq_s64(a, vdupq_n_s64(N as _)) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] #[doc = "## Safety"] @@ -64284,7 +62092,6 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = vqshlq_s64(a, vdupq_n_s64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] #[doc = "## Safety"] @@ -64311,7 +62118,6 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); vqshl_u8(a, vdup_n_s8(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] #[doc = "## Safety"] @@ -64340,7 +62146,6 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = vqshl_u8(a, vdup_n_s8(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] #[doc = "## Safety"] @@ -64367,7 +62172,6 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); vqshlq_u8(a, vdupq_n_s8(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] #[doc = "## Safety"] @@ -64400,7 +62204,6 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] #[doc = "## Safety"] @@ -64427,7 +62230,6 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); vqshl_u16(a, vdup_n_s16(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] #[doc = "## Safety"] @@ -64456,7 +62258,6 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = vqshl_u16(a, vdup_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] #[doc = "## Safety"] @@ -64483,7 +62284,6 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); vqshlq_u16(a, vdupq_n_s16(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] #[doc = "## Safety"] @@ -64512,7 +62312,6 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = vqshlq_u16(a, vdupq_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] #[doc = "## Safety"] @@ -64539,7 +62338,6 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); vqshl_u32(a, vdup_n_s32(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] #[doc = "## Safety"] @@ -64568,7 +62366,6 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = vqshl_u32(a, vdup_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] #[doc = "## Safety"] @@ -64595,7 +62392,6 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); vqshlq_u32(a, vdupq_n_s32(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] #[doc = "## Safety"] @@ -64624,7 +62420,6 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = vqshlq_u32(a, vdupq_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] #[doc = "## Safety"] @@ -64650,7 +62445,6 @@ pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); vqshl_u64(a, vdup_n_s64(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] #[doc = "## Safety"] @@ -64677,7 +62471,6 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); vqshlq_u64(a, vdupq_n_s64(N as _)) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] #[doc = "## Safety"] @@ -64706,7 +62499,6 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = vqshlq_u64(a, vdupq_n_s64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] #[doc = "## Safety"] @@ -64729,7 +62521,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64739,7 +62531,6 @@ pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vqshl_s8(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] #[doc = "## Safety"] @@ -64762,7 +62553,7 @@ pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64775,7 +62566,6 @@ pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqshl_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] #[doc = "## Safety"] @@ -64798,7 +62588,7 @@ pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64808,7 +62598,6 @@ pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vqshlq_s8(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] #[doc = "## Safety"] @@ -64831,7 +62620,7 @@ pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64848,7 +62637,6 @@ pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] #[doc = "## Safety"] @@ -64871,7 +62659,7 @@ pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64881,7 +62669,6 @@ pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqshl_s16(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] #[doc = "## Safety"] @@ -64904,7 +62691,7 @@ pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64917,7 +62704,6 @@ pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqshl_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] #[doc = "## Safety"] @@ -64940,7 +62726,7 @@ pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64950,7 +62736,6 @@ pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqshlq_s16(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] #[doc = "## Safety"] @@ -64973,7 +62758,7 @@ pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -64986,7 +62771,6 @@ pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqshlq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] #[doc = "## Safety"] @@ -65009,7 +62793,7 @@ pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65019,7 +62803,6 @@ pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqshl_s32(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] #[doc = "## Safety"] @@ -65042,7 +62825,7 @@ pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65055,7 +62838,6 @@ pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqshl_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] #[doc = "## Safety"] @@ -65078,7 +62860,7 @@ pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65088,7 +62870,6 @@ pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqshlq_s32(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] #[doc = "## Safety"] @@ -65111,7 +62892,7 @@ pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65124,7 +62905,6 @@ pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqshlq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] #[doc = "## Safety"] @@ -65146,7 +62926,7 @@ pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65156,7 +62936,6 @@ pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vqshl_s64(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] #[doc = "## Safety"] @@ -65179,7 +62958,7 @@ pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65189,7 +62968,6 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vqshlq_s64(a, b) } - #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] #[doc = "## Safety"] @@ -65212,7 +62990,7 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65225,7 +63003,6 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqshlq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] #[doc = "## Safety"] @@ -65248,7 +63025,7 @@ pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65258,7 +63035,6 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } _vqshl_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] #[doc = "## Safety"] @@ -65281,7 +63057,7 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65294,7 +63070,6 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqshl_u8(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] #[doc = "## Safety"] @@ -65317,7 +63092,7 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65327,7 +63102,6 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } _vqshlq_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] #[doc = "## Safety"] @@ -65350,7 +63124,7 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65367,7 +63141,6 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] #[doc = "## Safety"] @@ -65390,7 +63163,7 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65400,7 +63173,6 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } _vqshl_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] #[doc = "## Safety"] @@ -65423,7 +63195,7 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65436,7 +63208,6 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqshl_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] #[doc = "## Safety"] @@ -65459,7 +63230,7 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65469,7 +63240,6 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } _vqshlq_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] #[doc = "## Safety"] @@ -65492,7 +63262,7 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65505,7 +63275,6 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vqshlq_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] #[doc = "## Safety"] @@ -65528,7 +63297,7 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65538,7 +63307,6 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } _vqshl_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] #[doc = "## Safety"] @@ -65561,7 +63329,7 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65574,7 +63342,6 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqshl_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] #[doc = "## Safety"] @@ -65597,7 +63364,7 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65607,7 +63374,6 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } _vqshlq_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] #[doc = "## Safety"] @@ -65630,7 +63396,7 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65643,7 +63409,6 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vqshlq_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] #[doc = "## Safety"] @@ -65665,7 +63430,7 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65675,7 +63440,6 @@ pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } _vqshl_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] #[doc = "## Safety"] @@ -65698,7 +63462,7 @@ pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65708,7 +63472,6 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } _vqshlq_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] #[doc = "## Safety"] @@ -65731,7 +63494,7 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -65744,7 +63507,6 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vqshlq_u64(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[doc = "## Safety"] @@ -65758,7 +63520,7 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; } @@ -65772,7 +63534,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[doc = "## Safety"] @@ -65786,7 +63547,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; } @@ -65802,7 +63563,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[doc = "## Safety"] @@ -65816,7 +63576,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; } @@ -65831,7 +63591,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[doc = "## Safety"] @@ -65845,7 +63604,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; } @@ -65866,7 +63625,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[doc = "## Safety"] @@ -65880,7 +63638,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; } @@ -65890,7 +63648,6 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[doc = "## Safety"] @@ -65904,7 +63661,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; } @@ -65916,7 +63673,6 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] #[doc = "## Safety"] @@ -65930,7 +63686,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; } @@ -65944,7 +63700,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] #[doc = "## Safety"] @@ -65958,7 +63713,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; } @@ -65974,7 +63729,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] #[doc = "## Safety"] @@ -65988,13 +63742,12 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; } _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] #[doc = "## Safety"] @@ -66008,7 +63761,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; } @@ -66017,7 +63770,6 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[doc = "## Safety"] @@ -66031,7 +63783,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; } @@ -66041,7 +63793,6 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[doc = "## Safety"] @@ -66055,7 +63806,7 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; } @@ -66067,7 +63818,6 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] #[doc = "## Safety"] @@ -66080,13 +63830,12 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; } _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[doc = "## Safety"] @@ -66100,13 +63849,12 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; } _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[doc = "## Safety"] @@ -66120,7 +63868,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; } @@ -66129,7 +63877,6 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[doc = "## Safety"] @@ -66143,7 +63890,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8" @@ -66160,7 +63907,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[doc = "## Safety"] @@ -66174,7 +63920,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8" @@ -66193,7 +63939,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[doc = "## Safety"] @@ -66207,7 +63952,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8" @@ -66225,7 +63970,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[doc = "## Safety"] @@ -66239,7 +63983,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8" @@ -66263,7 +64007,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[doc = "## Safety"] @@ -66277,7 +64020,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16" @@ -66290,7 +64033,6 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[doc = "## Safety"] @@ -66304,7 +64046,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16" @@ -66319,7 +64061,6 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] #[doc = "## Safety"] @@ -66333,7 +64074,7 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16" @@ -66350,7 +64091,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] #[doc = "## Safety"] @@ -66364,7 +64104,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16" @@ -66383,7 +64123,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] #[doc = "## Safety"] @@ -66397,7 +64136,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32" @@ -66406,7 +64145,6 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { } _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] #[doc = "## Safety"] @@ -66420,7 +64158,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32" @@ -66432,7 +64170,6 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[doc = "## Safety"] @@ -66446,7 +64183,7 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32" @@ -66459,7 +64196,6 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { ) .as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[doc = "## Safety"] @@ -66473,7 +64209,7 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32" @@ -66488,7 +64224,6 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] #[doc = "## Safety"] @@ -66501,7 +64236,7 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64" @@ -66510,7 +64245,6 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { } _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[doc = "## Safety"] @@ -66524,7 +64258,7 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64" @@ -66533,7 +64267,6 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { } _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() } - #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[doc = "## Safety"] @@ -66547,7 +64280,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64" @@ -66559,7 +64292,6 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] @@ -66573,7 +64305,7 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -66587,7 +64319,6 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { }, ) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] @@ -66601,7 +64332,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -66617,7 +64348,6 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] @@ -66631,7 +64361,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -66640,7 +64370,6 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] @@ -66654,7 +64383,7 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -66665,7 +64394,6 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] @@ -66679,13 +64407,12 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] @@ -66699,7 +64426,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -66707,7 +64434,6 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] @@ -66721,7 +64447,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v8i8" @@ -66730,7 +64456,6 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { } _vqshrn_n_s16(a, N) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] @@ -66744,7 +64469,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v8i8" @@ -66755,7 +64480,6 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqshrn_n_s16(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] @@ -66769,7 +64493,7 @@ pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v4i16" @@ -66778,7 +64502,6 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { } _vqshrn_n_s32(a, N) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] @@ -66792,7 +64515,7 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v4i16" @@ -66803,7 +64526,6 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqshrn_n_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] @@ -66817,7 +64539,7 @@ pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v2i32" @@ -66826,7 +64548,6 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { } _vqshrn_n_s64(a, N) } - #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] @@ -66840,7 +64561,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.v2i32" @@ -66851,7 +64572,6 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqshrn_n_s64(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] @@ -66865,7 +64585,7 @@ pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -66881,7 +64601,6 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] @@ -66895,7 +64614,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -66913,7 +64632,6 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] @@ -66927,7 +64645,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -66937,7 +64655,6 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] @@ -66951,7 +64668,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -66963,7 +64680,6 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] @@ -66977,7 +64693,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -66987,7 +64703,6 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { ) .as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] @@ -67001,7 +64716,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -67013,7 +64728,6 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] @@ -67027,7 +64741,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v8i8" @@ -67036,7 +64750,6 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } _vqshrn_n_u16(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] @@ -67050,7 +64763,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v8i8" @@ -67061,7 +64774,6 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqshrn_n_u16(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] @@ -67075,7 +64787,7 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v4i16" @@ -67084,7 +64796,6 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } _vqshrn_n_u32(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] @@ -67098,7 +64809,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v4i16" @@ -67109,7 +64820,6 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqshrn_n_u32(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] @@ -67123,7 +64833,7 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v2i32" @@ -67132,7 +64842,6 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } _vqshrn_n_u64(a.as_signed(), N).as_unsigned() } - #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] @@ -67146,7 +64855,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v2i32" @@ -67157,7 +64866,6 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqshrn_n_u64(a.as_signed(), N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] @@ -67171,7 +64879,7 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -67186,7 +64894,6 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { ) .as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] @@ -67200,7 +64907,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -67217,7 +64924,6 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] @@ -67231,7 +64937,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -67241,7 +64947,6 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { ) .as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] @@ -67255,7 +64960,7 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -67267,7 +64972,6 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { .as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] @@ -67281,13 +64985,12 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] @@ -67301,7 +65004,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -67310,7 +65013,6 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] @@ -67324,7 +65026,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v8i8" @@ -67333,7 +65035,6 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { } _vqshrun_n_s16(a, N).as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] @@ -67347,7 +65048,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v8i8" @@ -67358,7 +65059,6 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqshrun_n_s16(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] @@ -67372,7 +65072,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v4i16" @@ -67381,7 +65081,6 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { } _vqshrun_n_s32(a, N).as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] @@ -67395,7 +65094,7 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v4i16" @@ -67406,7 +65105,6 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqshrun_n_s32(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] @@ -67420,7 +65118,7 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v2i32" @@ -67429,7 +65127,6 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { } _vqshrun_n_s64(a, N).as_unsigned() } - #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] @@ -67443,7 +65140,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v2i32" @@ -67454,7 +65151,6 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqshrun_n_s64(a, N).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] #[doc = "## Safety"] @@ -67477,7 +65173,7 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i8" @@ -67487,7 +65183,6 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vqsub_s8(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] #[doc = "## Safety"] @@ -67510,7 +65205,7 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i8" @@ -67523,7 +65218,6 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vqsub_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] #[doc = "## Safety"] @@ -67546,7 +65240,7 @@ pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v16i8" @@ -67556,7 +65250,6 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vqsubq_s8(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] #[doc = "## Safety"] @@ -67579,7 +65272,7 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v16i8" @@ -67596,7 +65289,6 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] #[doc = "## Safety"] @@ -67619,7 +65311,7 @@ pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i16" @@ -67629,7 +65321,6 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vqsub_s16(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] #[doc = "## Safety"] @@ -67652,7 +65343,7 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i16" @@ -67665,7 +65356,6 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vqsub_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] #[doc = "## Safety"] @@ -67688,7 +65378,7 @@ pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i16" @@ -67698,7 +65388,6 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vqsubq_s16(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] #[doc = "## Safety"] @@ -67721,7 +65410,7 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v8i16" @@ -67734,7 +65423,6 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vqsubq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] #[doc = "## Safety"] @@ -67757,7 +65445,7 @@ pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i32" @@ -67767,7 +65455,6 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vqsub_s32(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] #[doc = "## Safety"] @@ -67790,7 +65477,7 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i32" @@ -67803,7 +65490,6 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vqsub_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] #[doc = "## Safety"] @@ -67826,7 +65512,7 @@ pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i32" @@ -67836,7 +65522,6 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vqsubq_s32(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] #[doc = "## Safety"] @@ -67859,7 +65544,7 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v4i32" @@ -67872,7 +65557,6 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vqsubq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] #[doc = "## Safety"] @@ -67894,7 +65578,7 @@ pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v1i64" @@ -67904,7 +65588,6 @@ pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vqsub_s64(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] #[doc = "## Safety"] @@ -67927,7 +65610,7 @@ pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i64" @@ -67937,7 +65620,6 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vqsubq_s64(a, b) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] #[doc = "## Safety"] @@ -67960,7 +65642,7 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.v2i64" @@ -67973,7 +65655,6 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vqsubq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] #[doc = "## Safety"] @@ -67996,7 +65677,7 @@ pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i8" @@ -68006,7 +65687,6 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] #[doc = "## Safety"] @@ -68029,7 +65709,7 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i8" @@ -68042,7 +65722,6 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] #[doc = "## Safety"] @@ -68065,7 +65744,7 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v16i8" @@ -68075,7 +65754,6 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] #[doc = "## Safety"] @@ -68098,7 +65776,7 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v16i8" @@ -68115,7 +65793,6 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] #[doc = "## Safety"] @@ -68138,7 +65815,7 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i16" @@ -68148,7 +65825,6 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] #[doc = "## Safety"] @@ -68171,7 +65847,7 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i16" @@ -68184,7 +65860,6 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] #[doc = "## Safety"] @@ -68207,7 +65882,7 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i16" @@ -68217,7 +65892,6 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] #[doc = "## Safety"] @@ -68240,7 +65914,7 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v8i16" @@ -68253,7 +65927,6 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] #[doc = "## Safety"] @@ -68276,7 +65949,7 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i32" @@ -68286,7 +65959,6 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] #[doc = "## Safety"] @@ -68309,7 +65981,7 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i32" @@ -68322,7 +65994,6 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] #[doc = "## Safety"] @@ -68345,7 +66016,7 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i32" @@ -68355,7 +66026,6 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] #[doc = "## Safety"] @@ -68378,7 +66048,7 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v4i32" @@ -68391,7 +66061,6 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] #[doc = "## Safety"] @@ -68413,7 +66082,7 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v1i64" @@ -68423,7 +66092,6 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] #[doc = "## Safety"] @@ -68446,7 +66114,7 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i64" @@ -68456,7 +66124,6 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] #[doc = "## Safety"] @@ -68479,7 +66146,7 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.v2i64" @@ -68492,7 +66159,6 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] #[doc = "## Safety"] @@ -68518,7 +66184,6 @@ pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x let x = vraddhn_s16(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] #[doc = "## Safety"] @@ -68553,7 +66218,6 @@ pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] #[doc = "## Safety"] @@ -68579,7 +66243,6 @@ pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1 let x = vraddhn_s32(b, c); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] #[doc = "## Safety"] @@ -68609,7 +66272,6 @@ pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1 let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] #[doc = "## Safety"] @@ -68635,7 +66297,6 @@ pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3 let x = vraddhn_s64(b, c); simd_shuffle!(a, x, [0, 1, 2, 3]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] #[doc = "## Safety"] @@ -68665,7 +66326,6 @@ pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3 let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] #[doc = "## Safety"] @@ -68691,7 +66351,6 @@ pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] #[doc = "## Safety"] @@ -68726,7 +66385,6 @@ pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] #[doc = "## Safety"] @@ -68752,7 +66410,6 @@ pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] #[doc = "## Safety"] @@ -68782,7 +66439,6 @@ pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] #[doc = "## Safety"] @@ -68808,7 +66464,6 @@ pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); simd_shuffle!(a, x, [0, 1, 2, 3]) } - #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] #[doc = "## Safety"] @@ -68838,7 +66493,6 @@ pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] #[doc = "## Safety"] @@ -68861,7 +66515,7 @@ pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v8i8" @@ -68871,7 +66525,6 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } _vraddhn_s16(a, b) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] #[doc = "## Safety"] @@ -68894,7 +66547,7 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v8i8" @@ -68907,7 +66560,6 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vraddhn_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] #[doc = "## Safety"] @@ -68930,7 +66582,7 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v4i16" @@ -68940,7 +66592,6 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } _vraddhn_s32(a, b) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] #[doc = "## Safety"] @@ -68963,7 +66614,7 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v4i16" @@ -68976,7 +66627,6 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vraddhn_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] #[doc = "## Safety"] @@ -68999,7 +66649,7 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v2i32" @@ -69009,7 +66659,6 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } _vraddhn_s64(a, b) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] #[doc = "## Safety"] @@ -69032,7 +66681,7 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.raddhn.v2i32" @@ -69045,7 +66694,6 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vraddhn_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[doc = "## Safety"] @@ -69070,7 +66718,6 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vraddhn_s16(transmute(a), transmute(b))) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[doc = "## Safety"] @@ -69098,7 +66745,6 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[doc = "## Safety"] @@ -69123,7 +66769,6 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vraddhn_s32(transmute(a), transmute(b))) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[doc = "## Safety"] @@ -69151,7 +66796,6 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[doc = "## Safety"] @@ -69176,7 +66820,6 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vraddhn_s64(transmute(a), transmute(b))) } - #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[doc = "## Safety"] @@ -69204,7 +66847,6 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] #[doc = "## Safety"] @@ -69227,7 +66869,7 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69237,7 +66879,6 @@ pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { } _vrecpe_f32(a) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] #[doc = "## Safety"] @@ -69260,7 +66901,7 @@ pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69272,7 +66913,6 @@ pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrecpe_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] #[doc = "## Safety"] @@ -69295,7 +66935,7 @@ pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69305,7 +66945,6 @@ pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { } _vrecpeq_f32(a) } - #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] #[doc = "## Safety"] @@ -69328,7 +66967,7 @@ pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69340,7 +66979,6 @@ pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrecpeq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] #[doc = "## Safety"] @@ -69363,7 +67001,7 @@ pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69373,7 +67011,6 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { } _vrecpe_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] #[doc = "## Safety"] @@ -69396,7 +67033,7 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69408,7 +67045,6 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vrecpe_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] #[doc = "## Safety"] @@ -69431,7 +67067,7 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69441,7 +67077,6 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { } _vrecpeq_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] #[doc = "## Safety"] @@ -69464,7 +67099,7 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69476,7 +67111,6 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vrecpeq_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] #[doc = "## Safety"] @@ -69499,7 +67133,7 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69509,7 +67143,6 @@ pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vrecps_f32(a, b) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] #[doc = "## Safety"] @@ -69532,7 +67165,7 @@ pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69545,7 +67178,6 @@ pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrecps_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] #[doc = "## Safety"] @@ -69568,7 +67200,7 @@ pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69578,7 +67210,6 @@ pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vrecpsq_f32(a, b) } - #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] #[doc = "## Safety"] @@ -69601,7 +67232,7 @@ pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -69614,7 +67245,6 @@ pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrecpsq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[doc = "## Safety"] @@ -69639,7 +67269,6 @@ pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[doc = "## Safety"] @@ -69665,7 +67294,6 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[doc = "## Safety"] @@ -69690,7 +67318,6 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[doc = "## Safety"] @@ -69717,7 +67344,6 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[doc = "## Safety"] @@ -69742,7 +67368,6 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[doc = "## Safety"] @@ -69769,7 +67394,6 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[doc = "## Safety"] @@ -69794,7 +67418,6 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[doc = "## Safety"] @@ -69821,7 +67444,6 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[doc = "## Safety"] @@ -69846,7 +67468,6 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[doc = "## Safety"] @@ -69872,7 +67493,6 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[doc = "## Safety"] @@ -69897,7 +67517,6 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[doc = "## Safety"] @@ -69924,7 +67543,6 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[doc = "## Safety"] @@ -69949,7 +67567,6 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[doc = "## Safety"] @@ -69976,7 +67593,6 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[doc = "## Safety"] @@ -70001,7 +67617,6 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[doc = "## Safety"] @@ -70028,7 +67643,6 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[doc = "## Safety"] @@ -70053,7 +67667,6 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[doc = "## Safety"] @@ -70079,7 +67692,6 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[doc = "## Safety"] @@ -70104,7 +67716,6 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[doc = "## Safety"] @@ -70131,7 +67742,6 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[doc = "## Safety"] @@ -70156,7 +67766,6 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[doc = "## Safety"] @@ -70183,7 +67792,6 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[doc = "## Safety"] @@ -70208,7 +67816,6 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[doc = "## Safety"] @@ -70234,7 +67841,6 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[doc = "## Safety"] @@ -70259,7 +67865,6 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[doc = "## Safety"] @@ -70290,7 +67895,6 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[doc = "## Safety"] @@ -70315,7 +67919,6 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[doc = "## Safety"] @@ -70342,7 +67945,6 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[doc = "## Safety"] @@ -70367,7 +67969,6 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[doc = "## Safety"] @@ -70394,7 +67995,6 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[doc = "## Safety"] @@ -70419,7 +68019,6 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[doc = "## Safety"] @@ -70446,7 +68045,6 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[doc = "## Safety"] @@ -70471,7 +68069,6 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[doc = "## Safety"] @@ -70502,7 +68099,6 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[doc = "## Safety"] @@ -70527,7 +68123,6 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[doc = "## Safety"] @@ -70554,7 +68149,6 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[doc = "## Safety"] @@ -70579,7 +68173,6 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[doc = "## Safety"] @@ -70606,7 +68199,6 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[doc = "## Safety"] @@ -70631,7 +68223,6 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[doc = "## Safety"] @@ -70658,7 +68249,6 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[doc = "## Safety"] @@ -70683,7 +68273,6 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[doc = "## Safety"] @@ -70714,7 +68303,6 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[doc = "## Safety"] @@ -70739,7 +68327,6 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[doc = "## Safety"] @@ -70766,7 +68353,6 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[doc = "## Safety"] @@ -70791,7 +68377,6 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[doc = "## Safety"] @@ -70818,7 +68403,6 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[doc = "## Safety"] @@ -70843,7 +68427,6 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[doc = "## Safety"] @@ -70870,7 +68453,6 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[doc = "## Safety"] @@ -70895,7 +68477,6 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[doc = "## Safety"] @@ -70922,7 +68503,6 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[doc = "## Safety"] @@ -70947,7 +68527,6 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[doc = "## Safety"] @@ -70973,7 +68552,6 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[doc = "## Safety"] @@ -70998,7 +68576,6 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[doc = "## Safety"] @@ -71025,7 +68602,6 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[doc = "## Safety"] @@ -71050,7 +68626,6 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[doc = "## Safety"] @@ -71077,7 +68652,6 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[doc = "## Safety"] @@ -71102,7 +68676,6 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[doc = "## Safety"] @@ -71129,7 +68702,6 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[doc = "## Safety"] @@ -71154,7 +68726,6 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[doc = "## Safety"] @@ -71180,7 +68751,6 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[doc = "## Safety"] @@ -71205,7 +68775,6 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[doc = "## Safety"] @@ -71232,7 +68801,6 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[doc = "## Safety"] @@ -71257,7 +68825,6 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[doc = "## Safety"] @@ -71284,7 +68851,6 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[doc = "## Safety"] @@ -71309,7 +68875,6 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[doc = "## Safety"] @@ -71336,7 +68901,6 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[doc = "## Safety"] @@ -71361,7 +68925,6 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[doc = "## Safety"] @@ -71388,7 +68951,6 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[doc = "## Safety"] @@ -71413,7 +68975,6 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[doc = "## Safety"] @@ -71440,7 +69001,6 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[doc = "## Safety"] @@ -71465,7 +69025,6 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[doc = "## Safety"] @@ -71492,7 +69051,6 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[doc = "## Safety"] @@ -71517,7 +69075,6 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[doc = "## Safety"] @@ -71548,7 +69105,6 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[doc = "## Safety"] @@ -71573,7 +69129,6 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[doc = "## Safety"] @@ -71600,7 +69155,6 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[doc = "## Safety"] @@ -71625,7 +69179,6 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[doc = "## Safety"] @@ -71652,7 +69205,6 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[doc = "## Safety"] @@ -71677,7 +69229,6 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[doc = "## Safety"] @@ -71704,7 +69255,6 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[doc = "## Safety"] @@ -71729,7 +69279,6 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[doc = "## Safety"] @@ -71760,7 +69309,6 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[doc = "## Safety"] @@ -71785,7 +69333,6 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[doc = "## Safety"] @@ -71812,7 +69359,6 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[doc = "## Safety"] @@ -71837,7 +69383,6 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[doc = "## Safety"] @@ -71864,7 +69409,6 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[doc = "## Safety"] @@ -71889,7 +69433,6 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[doc = "## Safety"] @@ -71916,7 +69459,6 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[doc = "## Safety"] @@ -71941,7 +69483,6 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[doc = "## Safety"] @@ -71968,7 +69509,6 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[doc = "## Safety"] @@ -71993,7 +69533,6 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[doc = "## Safety"] @@ -72019,7 +69558,6 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[doc = "## Safety"] @@ -72044,7 +69582,6 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[doc = "## Safety"] @@ -72071,7 +69608,6 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[doc = "## Safety"] @@ -72096,7 +69632,6 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[doc = "## Safety"] @@ -72123,7 +69658,6 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[doc = "## Safety"] @@ -72148,7 +69682,6 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[doc = "## Safety"] @@ -72175,7 +69708,6 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[doc = "## Safety"] @@ -72200,7 +69732,6 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[doc = "## Safety"] @@ -72226,7 +69757,6 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[doc = "## Safety"] @@ -72251,7 +69781,6 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[doc = "## Safety"] @@ -72278,7 +69807,6 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[doc = "## Safety"] @@ -72303,7 +69831,6 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[doc = "## Safety"] @@ -72330,7 +69857,6 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[doc = "## Safety"] @@ -72355,7 +69881,6 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[doc = "## Safety"] @@ -72382,7 +69907,6 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[doc = "## Safety"] @@ -72407,7 +69931,6 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[doc = "## Safety"] @@ -72438,7 +69961,6 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[doc = "## Safety"] @@ -72463,7 +69985,6 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[doc = "## Safety"] @@ -72490,7 +70011,6 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[doc = "## Safety"] @@ -72515,7 +70035,6 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[doc = "## Safety"] @@ -72542,7 +70061,6 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[doc = "## Safety"] @@ -72567,7 +70085,6 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[doc = "## Safety"] @@ -72598,7 +70115,6 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[doc = "## Safety"] @@ -72623,7 +70139,6 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[doc = "## Safety"] @@ -72650,7 +70165,6 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[doc = "## Safety"] @@ -72675,7 +70189,6 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[doc = "## Safety"] @@ -72702,7 +70215,6 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[doc = "## Safety"] @@ -72727,7 +70239,6 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[doc = "## Safety"] @@ -72754,7 +70265,6 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[doc = "## Safety"] @@ -72779,7 +70289,6 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[doc = "## Safety"] @@ -72810,7 +70319,6 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[doc = "## Safety"] @@ -72835,7 +70343,6 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[doc = "## Safety"] @@ -72862,7 +70369,6 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[doc = "## Safety"] @@ -72887,7 +70393,6 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[doc = "## Safety"] @@ -72914,7 +70419,6 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[doc = "## Safety"] @@ -72939,7 +70443,6 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[doc = "## Safety"] @@ -72966,7 +70469,6 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[doc = "## Safety"] @@ -72991,7 +70493,6 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[doc = "## Safety"] @@ -73018,7 +70519,6 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[doc = "## Safety"] @@ -73043,7 +70543,6 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[doc = "## Safety"] @@ -73069,7 +70568,6 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[doc = "## Safety"] @@ -73094,7 +70592,6 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[doc = "## Safety"] @@ -73121,7 +70618,6 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[doc = "## Safety"] @@ -73146,7 +70642,6 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[doc = "## Safety"] @@ -73173,7 +70668,6 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[doc = "## Safety"] @@ -73198,7 +70692,6 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[doc = "## Safety"] @@ -73225,7 +70718,6 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[doc = "## Safety"] @@ -73250,7 +70742,6 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[doc = "## Safety"] @@ -73276,7 +70767,6 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[doc = "## Safety"] @@ -73301,7 +70791,6 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[doc = "## Safety"] @@ -73328,7 +70817,6 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[doc = "## Safety"] @@ -73353,7 +70841,6 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[doc = "## Safety"] @@ -73380,7 +70867,6 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[doc = "## Safety"] @@ -73405,7 +70891,6 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[doc = "## Safety"] @@ -73432,7 +70917,6 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[doc = "## Safety"] @@ -73457,7 +70941,6 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[doc = "## Safety"] @@ -73488,7 +70971,6 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[doc = "## Safety"] @@ -73513,7 +70995,6 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[doc = "## Safety"] @@ -73540,7 +71021,6 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[doc = "## Safety"] @@ -73565,7 +71045,6 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[doc = "## Safety"] @@ -73592,7 +71071,6 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[doc = "## Safety"] @@ -73617,7 +71095,6 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[doc = "## Safety"] @@ -73648,7 +71125,6 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[doc = "## Safety"] @@ -73673,7 +71149,6 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[doc = "## Safety"] @@ -73700,7 +71175,6 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[doc = "## Safety"] @@ -73725,7 +71199,6 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[doc = "## Safety"] @@ -73752,7 +71225,6 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[doc = "## Safety"] @@ -73777,7 +71249,6 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[doc = "## Safety"] @@ -73804,7 +71275,6 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[doc = "## Safety"] @@ -73829,7 +71299,6 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[doc = "## Safety"] @@ -73860,7 +71329,6 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[doc = "## Safety"] @@ -73885,7 +71353,6 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[doc = "## Safety"] @@ -73912,7 +71379,6 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[doc = "## Safety"] @@ -73937,7 +71403,6 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[doc = "## Safety"] @@ -73963,7 +71428,6 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[doc = "## Safety"] @@ -73988,7 +71452,6 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[doc = "## Safety"] @@ -74014,7 +71477,6 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[doc = "## Safety"] @@ -74039,7 +71501,6 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[doc = "## Safety"] @@ -74065,7 +71526,6 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[doc = "## Safety"] @@ -74090,7 +71550,6 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[doc = "## Safety"] @@ -74116,7 +71575,6 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[doc = "## Safety"] @@ -74141,7 +71599,6 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[doc = "## Safety"] @@ -74167,7 +71624,6 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[doc = "## Safety"] @@ -74192,7 +71648,6 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[doc = "## Safety"] @@ -74218,7 +71673,6 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[doc = "## Safety"] @@ -74243,7 +71697,6 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[doc = "## Safety"] @@ -74269,7 +71722,6 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] #[doc = "## Safety"] @@ -74293,7 +71745,6 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[doc = "## Safety"] @@ -74318,7 +71769,6 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[doc = "## Safety"] @@ -74344,7 +71794,6 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[doc = "## Safety"] @@ -74369,7 +71818,6 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[doc = "## Safety"] @@ -74395,7 +71843,6 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[doc = "## Safety"] @@ -74420,7 +71867,6 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[doc = "## Safety"] @@ -74447,7 +71893,6 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[doc = "## Safety"] @@ -74472,7 +71917,6 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[doc = "## Safety"] @@ -74503,7 +71947,6 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[doc = "## Safety"] @@ -74528,7 +71971,6 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[doc = "## Safety"] @@ -74555,7 +71997,6 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[doc = "## Safety"] @@ -74580,7 +72021,6 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[doc = "## Safety"] @@ -74607,7 +72047,6 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[doc = "## Safety"] @@ -74632,7 +72071,6 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[doc = "## Safety"] @@ -74663,7 +72101,6 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[doc = "## Safety"] @@ -74688,7 +72125,6 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[doc = "## Safety"] @@ -74715,7 +72151,6 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[doc = "## Safety"] @@ -74740,7 +72175,6 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[doc = "## Safety"] @@ -74767,7 +72201,6 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[doc = "## Safety"] @@ -74792,7 +72225,6 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[doc = "## Safety"] @@ -74819,7 +72251,6 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[doc = "## Safety"] @@ -74844,7 +72275,6 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[doc = "## Safety"] @@ -74875,7 +72305,6 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[doc = "## Safety"] @@ -74900,7 +72329,6 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[doc = "## Safety"] @@ -74927,7 +72355,6 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[doc = "## Safety"] @@ -74952,7 +72379,6 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[doc = "## Safety"] @@ -74979,7 +72405,6 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[doc = "## Safety"] @@ -75004,7 +72429,6 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[doc = "## Safety"] @@ -75031,7 +72455,6 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[doc = "## Safety"] @@ -75056,7 +72479,6 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[doc = "## Safety"] @@ -75083,7 +72505,6 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[doc = "## Safety"] @@ -75108,7 +72529,6 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[doc = "## Safety"] @@ -75135,7 +72555,6 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[doc = "## Safety"] @@ -75160,7 +72579,6 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[doc = "## Safety"] @@ -75186,7 +72604,6 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[doc = "## Safety"] @@ -75211,7 +72628,6 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[doc = "## Safety"] @@ -75238,7 +72654,6 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[doc = "## Safety"] @@ -75263,7 +72678,6 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[doc = "## Safety"] @@ -75290,7 +72704,6 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[doc = "## Safety"] @@ -75315,7 +72728,6 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[doc = "## Safety"] @@ -75341,7 +72753,6 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[doc = "## Safety"] @@ -75366,7 +72777,6 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[doc = "## Safety"] @@ -75393,7 +72803,6 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[doc = "## Safety"] @@ -75418,7 +72827,6 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[doc = "## Safety"] @@ -75445,7 +72853,6 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[doc = "## Safety"] @@ -75470,7 +72877,6 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[doc = "## Safety"] @@ -75497,7 +72903,6 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[doc = "## Safety"] @@ -75522,7 +72927,6 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[doc = "## Safety"] @@ -75553,7 +72957,6 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[doc = "## Safety"] @@ -75578,7 +72981,6 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[doc = "## Safety"] @@ -75605,7 +73007,6 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[doc = "## Safety"] @@ -75630,7 +73031,6 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[doc = "## Safety"] @@ -75657,7 +73057,6 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[doc = "## Safety"] @@ -75682,7 +73081,6 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[doc = "## Safety"] @@ -75709,7 +73107,6 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[doc = "## Safety"] @@ -75734,7 +73131,6 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[doc = "## Safety"] @@ -75761,7 +73157,6 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[doc = "## Safety"] @@ -75786,7 +73181,6 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[doc = "## Safety"] @@ -75813,7 +73207,6 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[doc = "## Safety"] @@ -75838,7 +73231,6 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[doc = "## Safety"] @@ -75865,7 +73257,6 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[doc = "## Safety"] @@ -75890,7 +73281,6 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[doc = "## Safety"] @@ -75921,7 +73311,6 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[doc = "## Safety"] @@ -75946,7 +73335,6 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[doc = "## Safety"] @@ -75973,7 +73361,6 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[doc = "## Safety"] @@ -75998,7 +73385,6 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[doc = "## Safety"] @@ -76025,7 +73411,6 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[doc = "## Safety"] @@ -76050,7 +73435,6 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[doc = "## Safety"] @@ -76077,7 +73461,6 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[doc = "## Safety"] @@ -76102,7 +73485,6 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[doc = "## Safety"] @@ -76129,7 +73511,6 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[doc = "## Safety"] @@ -76154,7 +73535,6 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[doc = "## Safety"] @@ -76181,7 +73561,6 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[doc = "## Safety"] @@ -76206,7 +73585,6 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[doc = "## Safety"] @@ -76232,7 +73610,6 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[doc = "## Safety"] @@ -76257,7 +73634,6 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[doc = "## Safety"] @@ -76284,7 +73660,6 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[doc = "## Safety"] @@ -76309,7 +73684,6 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[doc = "## Safety"] @@ -76336,7 +73710,6 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[doc = "## Safety"] @@ -76361,7 +73734,6 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[doc = "## Safety"] @@ -76387,7 +73759,6 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[doc = "## Safety"] @@ -76412,7 +73783,6 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[doc = "## Safety"] @@ -76439,7 +73809,6 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[doc = "## Safety"] @@ -76464,7 +73833,6 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[doc = "## Safety"] @@ -76491,7 +73859,6 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[doc = "## Safety"] @@ -76516,7 +73883,6 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[doc = "## Safety"] @@ -76543,7 +73909,6 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[doc = "## Safety"] @@ -76568,7 +73933,6 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[doc = "## Safety"] @@ -76599,7 +73963,6 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[doc = "## Safety"] @@ -76624,7 +73987,6 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[doc = "## Safety"] @@ -76651,7 +74013,6 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[doc = "## Safety"] @@ -76676,7 +74037,6 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[doc = "## Safety"] @@ -76703,7 +74063,6 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[doc = "## Safety"] @@ -76728,7 +74087,6 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[doc = "## Safety"] @@ -76755,7 +74113,6 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[doc = "## Safety"] @@ -76780,7 +74137,6 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[doc = "## Safety"] @@ -76811,7 +74167,6 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[doc = "## Safety"] @@ -76836,7 +74191,6 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[doc = "## Safety"] @@ -76863,7 +74217,6 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[doc = "## Safety"] @@ -76888,7 +74241,6 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[doc = "## Safety"] @@ -76915,7 +74267,6 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[doc = "## Safety"] @@ -76940,7 +74291,6 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[doc = "## Safety"] @@ -76971,7 +74321,6 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[doc = "## Safety"] @@ -76996,7 +74345,6 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[doc = "## Safety"] @@ -77023,7 +74371,6 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[doc = "## Safety"] @@ -77048,7 +74395,6 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[doc = "## Safety"] @@ -77075,7 +74421,6 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[doc = "## Safety"] @@ -77100,7 +74445,6 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[doc = "## Safety"] @@ -77127,7 +74471,6 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[doc = "## Safety"] @@ -77152,7 +74495,6 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[doc = "## Safety"] @@ -77179,7 +74521,6 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[doc = "## Safety"] @@ -77204,7 +74545,6 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[doc = "## Safety"] @@ -77231,7 +74571,6 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[doc = "## Safety"] @@ -77256,7 +74595,6 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[doc = "## Safety"] @@ -77282,7 +74620,6 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[doc = "## Safety"] @@ -77307,7 +74644,6 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[doc = "## Safety"] @@ -77334,7 +74670,6 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[doc = "## Safety"] @@ -77359,7 +74694,6 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[doc = "## Safety"] @@ -77386,7 +74720,6 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[doc = "## Safety"] @@ -77411,7 +74744,6 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[doc = "## Safety"] @@ -77437,7 +74769,6 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[doc = "## Safety"] @@ -77462,7 +74793,6 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[doc = "## Safety"] @@ -77489,7 +74819,6 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[doc = "## Safety"] @@ -77514,7 +74843,6 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[doc = "## Safety"] @@ -77541,7 +74869,6 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[doc = "## Safety"] @@ -77566,7 +74893,6 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[doc = "## Safety"] @@ -77593,7 +74919,6 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[doc = "## Safety"] @@ -77618,7 +74943,6 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[doc = "## Safety"] @@ -77649,7 +74973,6 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[doc = "## Safety"] @@ -77674,7 +74997,6 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[doc = "## Safety"] @@ -77701,7 +75023,6 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[doc = "## Safety"] @@ -77726,7 +75047,6 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[doc = "## Safety"] @@ -77753,7 +75073,6 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[doc = "## Safety"] @@ -77778,7 +75097,6 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[doc = "## Safety"] @@ -77805,7 +75123,6 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[doc = "## Safety"] @@ -77830,7 +75147,6 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[doc = "## Safety"] @@ -77861,7 +75177,6 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[doc = "## Safety"] @@ -77886,7 +75201,6 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[doc = "## Safety"] @@ -77913,7 +75227,6 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[doc = "## Safety"] @@ -77938,7 +75251,6 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[doc = "## Safety"] @@ -77965,7 +75277,6 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[doc = "## Safety"] @@ -77990,7 +75301,6 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[doc = "## Safety"] @@ -78021,7 +75331,6 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[doc = "## Safety"] @@ -78046,7 +75355,6 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[doc = "## Safety"] @@ -78073,7 +75381,6 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[doc = "## Safety"] @@ -78098,7 +75405,6 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[doc = "## Safety"] @@ -78124,7 +75430,6 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[doc = "## Safety"] @@ -78149,7 +75454,6 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[doc = "## Safety"] @@ -78175,7 +75479,6 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[doc = "## Safety"] @@ -78200,7 +75503,6 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[doc = "## Safety"] @@ -78226,7 +75528,6 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[doc = "## Safety"] @@ -78251,7 +75552,6 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[doc = "## Safety"] @@ -78277,7 +75577,6 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] #[doc = "## Safety"] @@ -78301,7 +75600,6 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[doc = "## Safety"] @@ -78326,7 +75624,6 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[doc = "## Safety"] @@ -78352,7 +75649,6 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[doc = "## Safety"] @@ -78377,7 +75673,6 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[doc = "## Safety"] @@ -78403,7 +75698,6 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[doc = "## Safety"] @@ -78428,7 +75722,6 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[doc = "## Safety"] @@ -78454,7 +75747,6 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[doc = "## Safety"] @@ -78479,7 +75771,6 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[doc = "## Safety"] @@ -78505,7 +75796,6 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[doc = "## Safety"] @@ -78530,7 +75820,6 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[doc = "## Safety"] @@ -78556,7 +75845,6 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[doc = "## Safety"] @@ -78581,7 +75869,6 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[doc = "## Safety"] @@ -78608,7 +75895,6 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[doc = "## Safety"] @@ -78633,7 +75919,6 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[doc = "## Safety"] @@ -78664,7 +75949,6 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[doc = "## Safety"] @@ -78689,7 +75973,6 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[doc = "## Safety"] @@ -78716,7 +75999,6 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[doc = "## Safety"] @@ -78741,7 +76023,6 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[doc = "## Safety"] @@ -78768,7 +76049,6 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[doc = "## Safety"] @@ -78793,7 +76073,6 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[doc = "## Safety"] @@ -78820,7 +76099,6 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[doc = "## Safety"] @@ -78845,7 +76123,6 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[doc = "## Safety"] @@ -78876,7 +76153,6 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[doc = "## Safety"] @@ -78901,7 +76177,6 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[doc = "## Safety"] @@ -78928,7 +76203,6 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[doc = "## Safety"] @@ -78953,7 +76227,6 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[doc = "## Safety"] @@ -78980,7 +76253,6 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[doc = "## Safety"] @@ -79005,7 +76277,6 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[doc = "## Safety"] @@ -79036,7 +76307,6 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[doc = "## Safety"] @@ -79061,7 +76331,6 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[doc = "## Safety"] @@ -79088,7 +76357,6 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[doc = "## Safety"] @@ -79113,7 +76381,6 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[doc = "## Safety"] @@ -79140,7 +76407,6 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[doc = "## Safety"] @@ -79165,7 +76431,6 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[doc = "## Safety"] @@ -79192,7 +76457,6 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[doc = "## Safety"] @@ -79217,7 +76481,6 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[doc = "## Safety"] @@ -79244,7 +76507,6 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[doc = "## Safety"] @@ -79269,7 +76531,6 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[doc = "## Safety"] @@ -79296,7 +76557,6 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[doc = "## Safety"] @@ -79321,7 +76581,6 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[doc = "## Safety"] @@ -79347,7 +76606,6 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[doc = "## Safety"] @@ -79372,7 +76630,6 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[doc = "## Safety"] @@ -79399,7 +76656,6 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[doc = "## Safety"] @@ -79424,7 +76680,6 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[doc = "## Safety"] @@ -79451,7 +76706,6 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[doc = "## Safety"] @@ -79476,7 +76730,6 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[doc = "## Safety"] @@ -79503,7 +76756,6 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[doc = "## Safety"] @@ -79528,7 +76780,6 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[doc = "## Safety"] @@ -79554,7 +76805,6 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[doc = "## Safety"] @@ -79579,7 +76829,6 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[doc = "## Safety"] @@ -79606,7 +76855,6 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[doc = "## Safety"] @@ -79631,7 +76879,6 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[doc = "## Safety"] @@ -79658,7 +76905,6 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[doc = "## Safety"] @@ -79683,7 +76929,6 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[doc = "## Safety"] @@ -79714,7 +76959,6 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[doc = "## Safety"] @@ -79739,7 +76983,6 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[doc = "## Safety"] @@ -79766,7 +77009,6 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[doc = "## Safety"] @@ -79791,7 +77033,6 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[doc = "## Safety"] @@ -79818,7 +77059,6 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[doc = "## Safety"] @@ -79843,7 +77083,6 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[doc = "## Safety"] @@ -79870,7 +77109,6 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[doc = "## Safety"] @@ -79895,7 +77133,6 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[doc = "## Safety"] @@ -79926,7 +77163,6 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[doc = "## Safety"] @@ -79951,7 +77187,6 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[doc = "## Safety"] @@ -79978,7 +77213,6 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[doc = "## Safety"] @@ -80003,7 +77237,6 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[doc = "## Safety"] @@ -80030,7 +77263,6 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[doc = "## Safety"] @@ -80055,7 +77287,6 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[doc = "## Safety"] @@ -80082,7 +77313,6 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[doc = "## Safety"] @@ -80107,7 +77337,6 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[doc = "## Safety"] @@ -80134,7 +77363,6 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[doc = "## Safety"] @@ -80159,7 +77387,6 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[doc = "## Safety"] @@ -80186,7 +77413,6 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[doc = "## Safety"] @@ -80211,7 +77437,6 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[doc = "## Safety"] @@ -80238,7 +77463,6 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[doc = "## Safety"] @@ -80263,7 +77487,6 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[doc = "## Safety"] @@ -80290,7 +77513,6 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[doc = "## Safety"] @@ -80315,7 +77537,6 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[doc = "## Safety"] @@ -80342,7 +77563,6 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[doc = "## Safety"] @@ -80367,7 +77587,6 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[doc = "## Safety"] @@ -80393,7 +77612,6 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[doc = "## Safety"] @@ -80418,7 +77636,6 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[doc = "## Safety"] @@ -80445,7 +77662,6 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[doc = "## Safety"] @@ -80470,7 +77686,6 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[doc = "## Safety"] @@ -80497,7 +77712,6 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[doc = "## Safety"] @@ -80522,7 +77736,6 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[doc = "## Safety"] @@ -80549,7 +77762,6 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[doc = "## Safety"] @@ -80574,7 +77786,6 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[doc = "## Safety"] @@ -80600,7 +77811,6 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[doc = "## Safety"] @@ -80625,7 +77835,6 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[doc = "## Safety"] @@ -80652,7 +77861,6 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[doc = "## Safety"] @@ -80677,7 +77885,6 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[doc = "## Safety"] @@ -80704,7 +77911,6 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[doc = "## Safety"] @@ -80729,7 +77935,6 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[doc = "## Safety"] @@ -80760,7 +77965,6 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[doc = "## Safety"] @@ -80785,7 +77989,6 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[doc = "## Safety"] @@ -80812,7 +78015,6 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[doc = "## Safety"] @@ -80837,7 +78039,6 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[doc = "## Safety"] @@ -80864,7 +78065,6 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[doc = "## Safety"] @@ -80889,7 +78089,6 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[doc = "## Safety"] @@ -80916,7 +78115,6 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[doc = "## Safety"] @@ -80941,7 +78139,6 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[doc = "## Safety"] @@ -80972,7 +78169,6 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[doc = "## Safety"] @@ -80997,7 +78193,6 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[doc = "## Safety"] @@ -81024,7 +78219,6 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[doc = "## Safety"] @@ -81049,7 +78243,6 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[doc = "## Safety"] @@ -81076,7 +78269,6 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[doc = "## Safety"] @@ -81101,7 +78293,6 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[doc = "## Safety"] @@ -81128,7 +78319,6 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[doc = "## Safety"] @@ -81153,7 +78343,6 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[doc = "## Safety"] @@ -81184,7 +78373,6 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[doc = "## Safety"] @@ -81209,7 +78397,6 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[doc = "## Safety"] @@ -81239,7 +78426,6 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[doc = "## Safety"] @@ -81264,7 +78450,6 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[doc = "## Safety"] @@ -81290,7 +78475,6 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[doc = "## Safety"] @@ -81315,7 +78499,6 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[doc = "## Safety"] @@ -81341,7 +78524,6 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[doc = "## Safety"] @@ -81366,7 +78548,6 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[doc = "## Safety"] @@ -81392,7 +78573,6 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[doc = "## Safety"] @@ -81417,7 +78597,6 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[doc = "## Safety"] @@ -81447,7 +78626,6 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[doc = "## Safety"] @@ -81472,7 +78650,6 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[doc = "## Safety"] @@ -81498,7 +78675,6 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[doc = "## Safety"] @@ -81523,7 +78699,6 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[doc = "## Safety"] @@ -81549,7 +78724,6 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[doc = "## Safety"] @@ -81574,7 +78748,6 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[doc = "## Safety"] @@ -81600,7 +78773,6 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[doc = "## Safety"] @@ -81625,7 +78797,6 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[doc = "## Safety"] @@ -81655,7 +78826,6 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[doc = "## Safety"] @@ -81680,7 +78850,6 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[doc = "## Safety"] @@ -81706,7 +78875,6 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[doc = "## Safety"] @@ -81731,7 +78899,6 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[doc = "## Safety"] @@ -81757,7 +78924,6 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[doc = "## Safety"] @@ -81782,7 +78948,6 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[doc = "## Safety"] @@ -81808,7 +78973,6 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[doc = "## Safety"] @@ -81833,7 +78997,6 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[doc = "## Safety"] @@ -81859,7 +79022,6 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[doc = "## Safety"] @@ -81884,7 +79046,6 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[doc = "## Safety"] @@ -81911,7 +79072,6 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[doc = "## Safety"] @@ -81936,7 +79096,6 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[doc = "## Safety"] @@ -81962,7 +79121,6 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[doc = "## Safety"] @@ -81987,7 +79145,6 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[doc = "## Safety"] @@ -82013,7 +79170,6 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[doc = "## Safety"] @@ -82038,7 +79194,6 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[doc = "## Safety"] @@ -82065,7 +79220,6 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[doc = "## Safety"] @@ -82090,7 +79244,6 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[doc = "## Safety"] @@ -82116,7 +79269,6 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[doc = "## Safety"] @@ -82141,7 +79293,6 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[doc = "## Safety"] @@ -82167,7 +79318,6 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[doc = "## Safety"] @@ -82192,7 +79342,6 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[doc = "## Safety"] @@ -82219,7 +79368,6 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[doc = "## Safety"] @@ -82244,7 +79392,6 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[doc = "## Safety"] @@ -82270,7 +79417,6 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[doc = "## Safety"] @@ -82295,7 +79441,6 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[doc = "## Safety"] @@ -82321,7 +79466,6 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[doc = "## Safety"] @@ -82346,7 +79490,6 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[doc = "## Safety"] @@ -82372,7 +79515,6 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[doc = "## Safety"] @@ -82397,7 +79539,6 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[doc = "## Safety"] @@ -82424,7 +79565,6 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[doc = "## Safety"] @@ -82449,7 +79589,6 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[doc = "## Safety"] @@ -82475,7 +79614,6 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[doc = "## Safety"] @@ -82500,7 +79638,6 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[doc = "## Safety"] @@ -82526,7 +79663,6 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[doc = "## Safety"] @@ -82551,7 +79687,6 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[doc = "## Safety"] @@ -82578,7 +79713,6 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[doc = "## Safety"] @@ -82603,7 +79737,6 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[doc = "## Safety"] @@ -82629,7 +79762,6 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[doc = "## Safety"] @@ -82654,7 +79786,6 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[doc = "## Safety"] @@ -82680,7 +79811,6 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[doc = "## Safety"] @@ -82705,7 +79835,6 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[doc = "## Safety"] @@ -82732,7 +79861,6 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[doc = "## Safety"] @@ -82757,7 +79885,6 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[doc = "## Safety"] @@ -82783,7 +79910,6 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[doc = "## Safety"] @@ -82808,7 +79934,6 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[doc = "## Safety"] @@ -82834,7 +79959,6 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[doc = "## Safety"] @@ -82859,7 +79983,6 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[doc = "## Safety"] @@ -82885,7 +80008,6 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[doc = "## Safety"] @@ -82910,7 +80032,6 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[doc = "## Safety"] @@ -82937,7 +80058,6 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[doc = "## Safety"] @@ -82962,7 +80082,6 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[doc = "## Safety"] @@ -82988,7 +80107,6 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[doc = "## Safety"] @@ -83013,7 +80131,6 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[doc = "## Safety"] @@ -83039,7 +80156,6 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[doc = "## Safety"] @@ -83064,7 +80180,6 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[doc = "## Safety"] @@ -83091,7 +80206,6 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[doc = "## Safety"] @@ -83116,7 +80230,6 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[doc = "## Safety"] @@ -83142,7 +80255,6 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[doc = "## Safety"] @@ -83167,7 +80279,6 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[doc = "## Safety"] @@ -83193,7 +80304,6 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[doc = "## Safety"] @@ -83218,7 +80328,6 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[doc = "## Safety"] @@ -83244,7 +80353,6 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[doc = "## Safety"] @@ -83269,7 +80377,6 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[doc = "## Safety"] @@ -83295,7 +80402,6 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[doc = "## Safety"] @@ -83320,7 +80426,6 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[doc = "## Safety"] @@ -83346,7 +80451,6 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[doc = "## Safety"] @@ -83371,7 +80475,6 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[doc = "## Safety"] @@ -83397,7 +80500,6 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[doc = "## Safety"] @@ -83422,7 +80524,6 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[doc = "## Safety"] @@ -83448,7 +80549,6 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[doc = "## Safety"] @@ -83473,7 +80573,6 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[doc = "## Safety"] @@ -83499,7 +80598,6 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[doc = "## Safety"] @@ -83524,7 +80622,6 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[doc = "## Safety"] @@ -83550,7 +80647,6 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[doc = "## Safety"] @@ -83575,7 +80671,6 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[doc = "## Safety"] @@ -83606,7 +80701,6 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[doc = "## Safety"] @@ -83631,7 +80725,6 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[doc = "## Safety"] @@ -83658,7 +80751,6 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[doc = "## Safety"] @@ -83683,7 +80775,6 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[doc = "## Safety"] @@ -83710,7 +80801,6 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[doc = "## Safety"] @@ -83735,7 +80825,6 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[doc = "## Safety"] @@ -83766,7 +80855,6 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[doc = "## Safety"] @@ -83791,7 +80879,6 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[doc = "## Safety"] @@ -83818,7 +80905,6 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[doc = "## Safety"] @@ -83843,7 +80929,6 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[doc = "## Safety"] @@ -83870,7 +80955,6 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[doc = "## Safety"] @@ -83895,7 +80979,6 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[doc = "## Safety"] @@ -83926,7 +81009,6 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[doc = "## Safety"] @@ -83951,7 +81033,6 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { transmute(a) } - #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[doc = "## Safety"] @@ -83978,7 +81059,6 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] #[doc = "## Safety"] @@ -84001,7 +81081,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i8" @@ -84011,7 +81091,6 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vrhadd_s8(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] #[doc = "## Safety"] @@ -84034,7 +81113,7 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i8" @@ -84047,7 +81126,6 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vrhadd_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] #[doc = "## Safety"] @@ -84070,7 +81148,7 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v16i8" @@ -84080,7 +81158,6 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vrhaddq_s8(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] #[doc = "## Safety"] @@ -84103,7 +81180,7 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v16i8" @@ -84120,7 +81197,6 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] #[doc = "## Safety"] @@ -84143,7 +81219,7 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i16" @@ -84153,7 +81229,6 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vrhadd_s16(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] #[doc = "## Safety"] @@ -84176,7 +81251,7 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i16" @@ -84189,7 +81264,6 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vrhadd_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] #[doc = "## Safety"] @@ -84212,7 +81286,7 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i16" @@ -84222,7 +81296,6 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vrhaddq_s16(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] #[doc = "## Safety"] @@ -84245,7 +81318,7 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v8i16" @@ -84258,7 +81331,6 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vrhaddq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] #[doc = "## Safety"] @@ -84281,7 +81353,7 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v2i32" @@ -84291,7 +81363,6 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vrhadd_s32(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] #[doc = "## Safety"] @@ -84314,7 +81385,7 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v2i32" @@ -84327,7 +81398,6 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vrhadd_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] #[doc = "## Safety"] @@ -84350,7 +81420,7 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i32" @@ -84360,7 +81430,6 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vrhaddq_s32(a, b) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] #[doc = "## Safety"] @@ -84383,7 +81452,7 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srhadd.v4i32" @@ -84396,7 +81465,6 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vrhaddq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] #[doc = "## Safety"] @@ -84419,7 +81487,7 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i8" @@ -84429,7 +81497,6 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] #[doc = "## Safety"] @@ -84452,7 +81519,7 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i8" @@ -84465,7 +81532,6 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] #[doc = "## Safety"] @@ -84488,7 +81554,7 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v16i8" @@ -84498,7 +81564,6 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] #[doc = "## Safety"] @@ -84521,7 +81586,7 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v16i8" @@ -84538,7 +81603,6 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] #[doc = "## Safety"] @@ -84561,7 +81625,7 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i16" @@ -84571,7 +81635,6 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] #[doc = "## Safety"] @@ -84594,7 +81657,7 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i16" @@ -84607,7 +81670,6 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] #[doc = "## Safety"] @@ -84630,7 +81692,7 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i16" @@ -84640,7 +81702,6 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] #[doc = "## Safety"] @@ -84663,7 +81724,7 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v8i16" @@ -84676,7 +81737,6 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] #[doc = "## Safety"] @@ -84699,7 +81759,7 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v2i32" @@ -84709,7 +81769,6 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] #[doc = "## Safety"] @@ -84732,7 +81791,7 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v2i32" @@ -84745,7 +81804,6 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] #[doc = "## Safety"] @@ -84768,7 +81826,7 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i32" @@ -84778,7 +81836,6 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } - #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] #[doc = "## Safety"] @@ -84801,7 +81858,7 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urhadd.v4i32" @@ -84814,7 +81871,6 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] #[doc = "## Safety"] @@ -84837,7 +81893,7 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f32" @@ -84847,7 +81903,6 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { } _vrndn_f32(a) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] #[doc = "## Safety"] @@ -84870,7 +81925,7 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f32" @@ -84882,7 +81937,6 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrndn_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] #[doc = "## Safety"] @@ -84905,7 +81959,7 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v4f32" @@ -84915,7 +81969,6 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { } _vrndnq_f32(a) } - #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] #[doc = "## Safety"] @@ -84938,7 +81991,7 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v4f32" @@ -84950,7 +82003,6 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrndnq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] #[doc = "## Safety"] @@ -84973,7 +82025,7 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -84983,7 +82035,6 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vrshl_s8(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] #[doc = "## Safety"] @@ -85006,7 +82057,7 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85019,7 +82070,6 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vrshl_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] #[doc = "## Safety"] @@ -85042,7 +82092,7 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85052,7 +82102,6 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vrshlq_s8(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] #[doc = "## Safety"] @@ -85075,7 +82124,7 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85092,7 +82141,6 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] #[doc = "## Safety"] @@ -85115,7 +82163,7 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85125,7 +82173,6 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vrshl_s16(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] #[doc = "## Safety"] @@ -85148,7 +82195,7 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85161,7 +82208,6 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vrshl_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] #[doc = "## Safety"] @@ -85184,7 +82230,7 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85194,7 +82240,6 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vrshlq_s16(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] #[doc = "## Safety"] @@ -85217,7 +82262,7 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85230,7 +82275,6 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vrshlq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] #[doc = "## Safety"] @@ -85253,7 +82297,7 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85263,7 +82307,6 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vrshl_s32(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] #[doc = "## Safety"] @@ -85286,7 +82329,7 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85299,7 +82342,6 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vrshl_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] #[doc = "## Safety"] @@ -85322,7 +82364,7 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85332,7 +82374,6 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vrshlq_s32(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] #[doc = "## Safety"] @@ -85355,7 +82396,7 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85368,7 +82409,6 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vrshlq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] #[doc = "## Safety"] @@ -85390,7 +82430,7 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85400,7 +82440,6 @@ pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vrshl_s64(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] #[doc = "## Safety"] @@ -85423,7 +82462,7 @@ pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85433,7 +82472,6 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vrshlq_s64(a, b) } - #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] #[doc = "## Safety"] @@ -85456,7 +82494,7 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85469,7 +82507,6 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vrshlq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] #[doc = "## Safety"] @@ -85492,7 +82529,7 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85502,7 +82539,6 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } _vrshl_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] #[doc = "## Safety"] @@ -85525,7 +82561,7 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85538,7 +82574,6 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vrshl_u8(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] #[doc = "## Safety"] @@ -85561,7 +82596,7 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85571,7 +82606,6 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } _vrshlq_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] #[doc = "## Safety"] @@ -85594,7 +82628,7 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85611,7 +82645,6 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] #[doc = "## Safety"] @@ -85634,7 +82667,7 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85644,7 +82677,6 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } _vrshl_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] #[doc = "## Safety"] @@ -85667,7 +82699,7 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85680,7 +82712,6 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vrshl_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] #[doc = "## Safety"] @@ -85703,7 +82734,7 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85713,7 +82744,6 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } _vrshlq_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] #[doc = "## Safety"] @@ -85736,7 +82766,7 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85749,7 +82779,6 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vrshlq_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] #[doc = "## Safety"] @@ -85772,7 +82801,7 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85782,7 +82811,6 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } _vrshl_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] #[doc = "## Safety"] @@ -85805,7 +82833,7 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85818,7 +82846,6 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vrshl_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] #[doc = "## Safety"] @@ -85841,7 +82868,7 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85851,7 +82878,6 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } _vrshlq_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] #[doc = "## Safety"] @@ -85874,7 +82900,7 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85887,7 +82913,6 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vrshlq_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] #[doc = "## Safety"] @@ -85909,7 +82934,7 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85919,7 +82944,6 @@ pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } _vrshl_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] #[doc = "## Safety"] @@ -85942,7 +82966,7 @@ pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85952,7 +82976,6 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } _vrshlq_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] #[doc = "## Safety"] @@ -85975,7 +82998,7 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -85988,7 +83011,6 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vrshlq_u64(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] #[doc = "## Safety"] @@ -86015,7 +83037,6 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] #[doc = "## Safety"] @@ -86044,7 +83065,6 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vrshl_s8(a, vdup_n_s8(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] #[doc = "## Safety"] @@ -86071,7 +83091,6 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] #[doc = "## Safety"] @@ -86104,7 +83123,6 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] #[doc = "## Safety"] @@ -86131,7 +83149,6 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] #[doc = "## Safety"] @@ -86160,7 +83177,6 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = vrshl_s16(a, vdup_n_s16(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] #[doc = "## Safety"] @@ -86187,7 +83203,6 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] #[doc = "## Safety"] @@ -86216,7 +83231,6 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = vrshlq_s16(a, vdupq_n_s16(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] #[doc = "## Safety"] @@ -86243,7 +83257,6 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] #[doc = "## Safety"] @@ -86272,7 +83285,6 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = vrshl_s32(a, vdup_n_s32(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] #[doc = "## Safety"] @@ -86299,7 +83311,6 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] #[doc = "## Safety"] @@ -86328,7 +83339,6 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = vrshlq_s32(a, vdupq_n_s32(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] #[doc = "## Safety"] @@ -86354,7 +83364,6 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] #[doc = "## Safety"] @@ -86381,7 +83390,6 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64(-N as _)) } - #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] #[doc = "## Safety"] @@ -86410,7 +83418,6 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = vrshlq_s64(a, vdupq_n_s64(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] #[doc = "## Safety"] @@ -86437,7 +83444,6 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] #[doc = "## Safety"] @@ -86466,7 +83472,6 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = vrshl_u8(a, vdup_n_s8(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] #[doc = "## Safety"] @@ -86493,7 +83498,6 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] #[doc = "## Safety"] @@ -86526,7 +83530,6 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] #[doc = "## Safety"] @@ -86553,7 +83556,6 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] #[doc = "## Safety"] @@ -86582,7 +83584,6 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = vrshl_u16(a, vdup_n_s16(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] #[doc = "## Safety"] @@ -86609,7 +83610,6 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] #[doc = "## Safety"] @@ -86638,7 +83638,6 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = vrshlq_u16(a, vdupq_n_s16(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] #[doc = "## Safety"] @@ -86665,7 +83664,6 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] #[doc = "## Safety"] @@ -86694,7 +83692,6 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = vrshl_u32(a, vdup_n_s32(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] #[doc = "## Safety"] @@ -86721,7 +83718,6 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] #[doc = "## Safety"] @@ -86750,7 +83746,6 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = vrshlq_u32(a, vdupq_n_s32(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] #[doc = "## Safety"] @@ -86776,7 +83771,6 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] #[doc = "## Safety"] @@ -86803,7 +83797,6 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64(-N as _)) } - #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] #[doc = "## Safety"] @@ -86832,7 +83825,6 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = vrshlq_u64(a, vdupq_n_s64(-N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] #[doc = "## Safety"] @@ -86846,7 +83838,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -86860,7 +83852,6 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { }, ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] #[doc = "## Safety"] @@ -86874,7 +83865,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -86890,7 +83881,6 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] #[doc = "## Safety"] @@ -86904,7 +83894,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -86913,7 +83903,6 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] #[doc = "## Safety"] @@ -86927,7 +83916,7 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -86938,7 +83927,6 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { ); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] #[doc = "## Safety"] @@ -86952,13 +83940,12 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] #[doc = "## Safety"] @@ -86972,7 +83959,7 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -86980,7 +83967,6 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] #[doc = "## Safety"] @@ -86994,7 +83980,7 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v8i8" @@ -87003,7 +83989,6 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { } _vrshrn_n_s16(a, N) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] #[doc = "## Safety"] @@ -87017,7 +84002,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v8i8" @@ -87028,7 +84013,6 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vrshrn_n_s16(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] #[doc = "## Safety"] @@ -87042,7 +84026,7 @@ pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v4i16" @@ -87051,7 +84035,6 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { } _vrshrn_n_s32(a, N) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] #[doc = "## Safety"] @@ -87065,7 +84048,7 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v4i16" @@ -87076,7 +84059,6 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vrshrn_n_s32(a, N); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] #[doc = "## Safety"] @@ -87090,7 +84072,7 @@ pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v2i32" @@ -87099,7 +84081,6 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { } _vrshrn_n_s64(a, N) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] #[doc = "## Safety"] @@ -87113,7 +84094,7 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rshrn.v2i32" @@ -87124,7 +84105,6 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vrshrn_n_s64(a, N); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] #[doc = "## Safety"] @@ -87151,7 +84131,6 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); transmute(vrshrn_n_s16::(transmute(a))) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] #[doc = "## Safety"] @@ -87180,7 +84159,6 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vrshrn_n_s16::(transmute(a))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] #[doc = "## Safety"] @@ -87207,7 +84185,6 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); transmute(vrshrn_n_s32::(transmute(a))) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] #[doc = "## Safety"] @@ -87236,7 +84213,6 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(vrshrn_n_s32::(transmute(a))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] #[doc = "## Safety"] @@ -87263,7 +84239,6 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); transmute(vrshrn_n_s64::(transmute(a))) } - #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] #[doc = "## Safety"] @@ -87292,7 +84267,6 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vrshrn_n_s64::(transmute(a))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] #[doc = "## Safety"] @@ -87315,7 +84289,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87325,7 +84299,6 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { } _vrsqrte_f32(a) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] #[doc = "## Safety"] @@ -87348,7 +84321,7 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87360,7 +84333,6 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrsqrte_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] #[doc = "## Safety"] @@ -87383,7 +84355,7 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87393,7 +84365,6 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { } _vrsqrteq_f32(a) } - #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] #[doc = "## Safety"] @@ -87416,7 +84387,7 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87428,7 +84399,6 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrsqrteq_f32(a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] #[doc = "## Safety"] @@ -87451,7 +84421,7 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87461,7 +84431,6 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { } _vrsqrte_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] #[doc = "## Safety"] @@ -87484,7 +84453,7 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87496,7 +84465,6 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vrsqrte_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] #[doc = "## Safety"] @@ -87519,7 +84487,7 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87529,7 +84497,6 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { } _vrsqrteq_u32(a.as_signed()).as_unsigned() } - #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] #[doc = "## Safety"] @@ -87552,7 +84519,7 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87564,7 +84531,6 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vrsqrteq_u32(a.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] #[doc = "## Safety"] @@ -87587,7 +84553,7 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87597,7 +84563,6 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } _vrsqrts_f32(a, b) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] #[doc = "## Safety"] @@ -87620,7 +84585,7 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87633,7 +84598,6 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = _vrsqrts_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] #[doc = "## Safety"] @@ -87656,7 +84620,7 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87666,7 +84630,6 @@ pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } _vrsqrtsq_f32(a, b) } - #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] #[doc = "## Safety"] @@ -87689,7 +84652,7 @@ pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -87702,7 +84665,6 @@ pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = _vrsqrtsq_f32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] #[doc = "## Safety"] @@ -87729,7 +84691,6 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] #[doc = "## Safety"] @@ -87759,7 +84720,6 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_add(a, vrshr_n_s8::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] #[doc = "## Safety"] @@ -87786,7 +84746,6 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] #[doc = "## Safety"] @@ -87820,7 +84779,6 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] #[doc = "## Safety"] @@ -87847,7 +84805,6 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] #[doc = "## Safety"] @@ -87877,7 +84834,6 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = simd_add(a, vrshr_n_s16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] #[doc = "## Safety"] @@ -87904,7 +84860,6 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] #[doc = "## Safety"] @@ -87934,7 +84889,6 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ let ret_val: int16x8_t = simd_add(a, vrshrq_n_s16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] #[doc = "## Safety"] @@ -87961,7 +84915,6 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] #[doc = "## Safety"] @@ -87991,7 +84944,6 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = simd_add(a, vrshr_n_s32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] #[doc = "## Safety"] @@ -88018,7 +84970,6 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] #[doc = "## Safety"] @@ -88048,7 +84999,6 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ let ret_val: int32x4_t = simd_add(a, vrshrq_n_s32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] #[doc = "## Safety"] @@ -88074,7 +85024,6 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] #[doc = "## Safety"] @@ -88101,7 +85050,6 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::(b)) } - #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] #[doc = "## Safety"] @@ -88131,7 +85079,6 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ let ret_val: int64x2_t = simd_add(a, vrshrq_n_s64::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] #[doc = "## Safety"] @@ -88158,7 +85105,6 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] #[doc = "## Safety"] @@ -88188,7 +85134,6 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t let ret_val: uint8x8_t = simd_add(a, vrshr_n_u8::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] #[doc = "## Safety"] @@ -88215,7 +85160,6 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] #[doc = "## Safety"] @@ -88249,7 +85193,6 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] #[doc = "## Safety"] @@ -88276,7 +85219,6 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] #[doc = "## Safety"] @@ -88306,7 +85248,6 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x let ret_val: uint16x4_t = simd_add(a, vrshr_n_u16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] #[doc = "## Safety"] @@ -88333,7 +85274,6 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] #[doc = "## Safety"] @@ -88363,7 +85303,6 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 let ret_val: uint16x8_t = simd_add(a, vrshrq_n_u16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] #[doc = "## Safety"] @@ -88390,7 +85329,6 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] #[doc = "## Safety"] @@ -88420,7 +85358,6 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x let ret_val: uint32x2_t = simd_add(a, vrshr_n_u32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] #[doc = "## Safety"] @@ -88447,7 +85384,6 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] #[doc = "## Safety"] @@ -88477,7 +85413,6 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 let ret_val: uint32x4_t = simd_add(a, vrshrq_n_u32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] #[doc = "## Safety"] @@ -88503,7 +85438,6 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] #[doc = "## Safety"] @@ -88530,7 +85464,6 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::(b)) } - #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] #[doc = "## Safety"] @@ -88560,7 +85493,6 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 let ret_val: uint64x2_t = simd_add(a, vrshrq_n_u64::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] #[doc = "## Safety"] @@ -88583,7 +85515,7 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88593,7 +85525,6 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } _vrsubhn_s16(a, b) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] #[doc = "## Safety"] @@ -88616,7 +85547,7 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88629,7 +85560,6 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = _vrsubhn_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] #[doc = "## Safety"] @@ -88652,7 +85582,7 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88662,7 +85592,6 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } _vrsubhn_s32(a, b) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] #[doc = "## Safety"] @@ -88685,7 +85614,7 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88698,7 +85627,6 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = _vrsubhn_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] #[doc = "## Safety"] @@ -88721,7 +85649,7 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88731,7 +85659,6 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } _vrsubhn_s64(a, b) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] #[doc = "## Safety"] @@ -88754,7 +85681,7 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -88767,7 +85694,6 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = _vrsubhn_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] #[doc = "## Safety"] @@ -88792,7 +85718,6 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vrsubhn_s16(transmute(a), transmute(b))) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] #[doc = "## Safety"] @@ -88820,7 +85745,6 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vrsubhn_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] #[doc = "## Safety"] @@ -88845,7 +85769,6 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vrsubhn_s32(transmute(a), transmute(b))) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] #[doc = "## Safety"] @@ -88873,7 +85796,6 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = transmute(vrsubhn_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] #[doc = "## Safety"] @@ -88898,7 +85820,6 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vrsubhn_s64(transmute(a), transmute(b))) } - #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] #[doc = "## Safety"] @@ -88926,7 +85847,6 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = transmute(vrsubhn_s64(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] #[doc = "## Safety"] @@ -88953,7 +85873,6 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] #[doc = "## Safety"] @@ -88982,7 +85901,6 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x let ret_val: float32x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] #[doc = "## Safety"] @@ -89009,7 +85927,6 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] #[doc = "## Safety"] @@ -89038,7 +85955,6 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 let ret_val: float32x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] #[doc = "## Safety"] @@ -89065,7 +85981,6 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] #[doc = "## Safety"] @@ -89094,7 +86009,6 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] #[doc = "## Safety"] @@ -89121,7 +86035,6 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] #[doc = "## Safety"] @@ -89154,7 +86067,6 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] #[doc = "## Safety"] @@ -89181,7 +86093,6 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] #[doc = "## Safety"] @@ -89210,7 +86121,6 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] #[doc = "## Safety"] @@ -89237,7 +86147,6 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] #[doc = "## Safety"] @@ -89266,7 +86175,6 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] #[doc = "## Safety"] @@ -89293,7 +86201,6 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] #[doc = "## Safety"] @@ -89322,7 +86229,6 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] #[doc = "## Safety"] @@ -89349,7 +86255,6 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] #[doc = "## Safety"] @@ -89378,7 +86283,6 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] #[doc = "## Safety"] @@ -89405,7 +86309,6 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] #[doc = "## Safety"] @@ -89434,7 +86337,6 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] #[doc = "## Safety"] @@ -89461,7 +86363,6 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] #[doc = "## Safety"] @@ -89490,7 +86391,6 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] #[doc = "## Safety"] @@ -89517,7 +86417,6 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t static_assert_uimm_bits!(LANE, 4); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] #[doc = "## Safety"] @@ -89550,7 +86449,6 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] #[doc = "## Safety"] @@ -89577,7 +86475,6 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] #[doc = "## Safety"] @@ -89606,7 +86503,6 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ let ret_val: uint16x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] #[doc = "## Safety"] @@ -89633,7 +86529,6 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] #[doc = "## Safety"] @@ -89662,7 +86557,6 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 let ret_val: uint16x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] #[doc = "## Safety"] @@ -89689,7 +86583,6 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] #[doc = "## Safety"] @@ -89718,7 +86611,6 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ let ret_val: uint32x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] #[doc = "## Safety"] @@ -89745,7 +86637,6 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] #[doc = "## Safety"] @@ -89774,7 +86665,6 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 let ret_val: uint32x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] #[doc = "## Safety"] @@ -89801,7 +86691,6 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] #[doc = "## Safety"] @@ -89830,7 +86719,6 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 let ret_val: uint64x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] #[doc = "## Safety"] @@ -89857,7 +86745,6 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] #[doc = "## Safety"] @@ -89886,7 +86773,6 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] #[doc = "## Safety"] @@ -89913,7 +86799,6 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t static_assert_uimm_bits!(LANE, 4); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] #[doc = "## Safety"] @@ -89946,7 +86831,6 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] #[doc = "## Safety"] @@ -89973,7 +86857,6 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ static_assert_uimm_bits!(LANE, 2); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] #[doc = "## Safety"] @@ -90002,7 +86885,6 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ let ret_val: poly16x4_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] #[doc = "## Safety"] @@ -90029,7 +86911,6 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 static_assert_uimm_bits!(LANE, 3); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] #[doc = "## Safety"] @@ -90058,7 +86939,6 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 let ret_val: poly16x8_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] #[doc = "## Safety"] @@ -90084,7 +86964,6 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ static_assert!(LANE == 0); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] #[doc = "## Safety"] @@ -90110,7 +86989,6 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t static_assert!(LANE == 0); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] #[doc = "## Safety"] @@ -90136,7 +87014,6 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ static_assert!(LANE == 0); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] #[doc = "## Safety"] @@ -90163,7 +87040,6 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 static_assert_uimm_bits!(LANE, 1); simd_insert!(b, LANE as u32, a) } - #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] #[doc = "## Safety"] @@ -90192,7 +87068,6 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 let ret_val: poly64x2_t = simd_insert!(b, LANE as u32, a); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "SHA1 hash update accelerator, choose."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] #[doc = "## Safety"] @@ -90211,7 +87086,7 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1c" @@ -90221,7 +87096,6 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> } _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } - #[doc = "SHA1 hash update accelerator, choose."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] #[doc = "## Safety"] @@ -90240,7 +87114,7 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1c" @@ -90254,7 +87128,6 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 fixed rotate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] #[doc = "## Safety"] @@ -90272,7 +87145,7 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1h" @@ -90282,7 +87155,6 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { } _vsha1h_u32(hash_e.as_signed()).as_unsigned() } - #[doc = "SHA1 hash update accelerator, majority"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] #[doc = "## Safety"] @@ -90301,7 +87173,7 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1m" @@ -90311,7 +87183,6 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> } _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } - #[doc = "SHA1 hash update accelerator, majority"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] #[doc = "## Safety"] @@ -90330,7 +87201,7 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1m" @@ -90344,7 +87215,6 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 hash update accelerator, parity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] #[doc = "## Safety"] @@ -90363,7 +87233,7 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1p" @@ -90373,7 +87243,6 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> } _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } - #[doc = "SHA1 hash update accelerator, parity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] #[doc = "## Safety"] @@ -90392,7 +87261,7 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1p" @@ -90406,7 +87275,6 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] #[doc = "## Safety"] @@ -90425,7 +87293,7 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1su0" @@ -90435,7 +87303,6 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ } _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned() } - #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] #[doc = "## Safety"] @@ -90454,7 +87321,7 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1su0" @@ -90469,7 +87336,6 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] #[doc = "## Safety"] @@ -90488,7 +87354,7 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1su1" @@ -90498,7 +87364,6 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t } _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned() } - #[doc = "SHA1 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] #[doc = "## Safety"] @@ -90517,7 +87382,7 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha1su1" @@ -90530,7 +87395,6 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t let ret_val: uint32x4_t = _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 schedule update accelerator, upper part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] #[doc = "## Safety"] @@ -90553,7 +87417,7 @@ pub unsafe fn vsha256h2q_u32( hash_efgh: uint32x4_t, wk: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256h2" @@ -90563,7 +87427,6 @@ pub unsafe fn vsha256h2q_u32( } _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() } - #[doc = "SHA1 schedule update accelerator, upper part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] #[doc = "## Safety"] @@ -90586,7 +87449,7 @@ pub unsafe fn vsha256h2q_u32( hash_efgh: uint32x4_t, wk: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256h2" @@ -90601,7 +87464,6 @@ pub unsafe fn vsha256h2q_u32( _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] #[doc = "## Safety"] @@ -90624,7 +87486,7 @@ pub unsafe fn vsha256hq_u32( hash_efgh: uint32x4_t, wk: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256h" @@ -90634,7 +87496,6 @@ pub unsafe fn vsha256hq_u32( } _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() } - #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] #[doc = "## Safety"] @@ -90657,7 +87518,7 @@ pub unsafe fn vsha256hq_u32( hash_efgh: uint32x4_t, wk: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256h" @@ -90672,7 +87533,6 @@ pub unsafe fn vsha256hq_u32( _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA256 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] #[doc = "## Safety"] @@ -90691,7 +87551,7 @@ pub unsafe fn vsha256hq_u32( stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256su0" @@ -90701,7 +87561,6 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t } _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned() } - #[doc = "SHA256 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] #[doc = "## Safety"] @@ -90720,7 +87579,7 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256su0" @@ -90733,7 +87592,6 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t let ret_val: uint32x4_t = _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "SHA256 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] #[doc = "## Safety"] @@ -90756,7 +87614,7 @@ pub unsafe fn vsha256su1q_u32( w8_11: uint32x4_t, w12_15: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256su1" @@ -90766,7 +87624,6 @@ pub unsafe fn vsha256su1q_u32( } _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned() } - #[doc = "SHA256 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] #[doc = "## Safety"] @@ -90789,7 +87646,7 @@ pub unsafe fn vsha256su1q_u32( w8_11: uint32x4_t, w12_15: uint32x4_t, ) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha256su1" @@ -90804,7 +87661,6 @@ pub unsafe fn vsha256su1q_u32( _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] #[doc = "## Safety"] @@ -90814,13 +87670,12 @@ pub unsafe fn vsha256su1q_u32( #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } _vshiftins_v16i8(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] #[doc = "## Safety"] @@ -90830,7 +87685,7 @@ unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } @@ -90844,7 +87699,6 @@ unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v1i64)"] #[doc = "## Safety"] @@ -90853,13 +87707,12 @@ unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] fn _vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; } _vshiftins_v1i64(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] #[doc = "## Safety"] @@ -90869,13 +87722,12 @@ unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; } _vshiftins_v2i32(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] #[doc = "## Safety"] @@ -90885,7 +87737,7 @@ unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; } @@ -90895,7 +87747,6 @@ unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t let ret_val: int32x2_t = _vshiftins_v2i32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] #[doc = "## Safety"] @@ -90905,13 +87756,12 @@ unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } _vshiftins_v2i64(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] #[doc = "## Safety"] @@ -90921,7 +87771,7 @@ unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } @@ -90931,7 +87781,6 @@ unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t let ret_val: int64x2_t = _vshiftins_v2i64(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] #[doc = "## Safety"] @@ -90941,13 +87790,12 @@ unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; } _vshiftins_v4i16(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] #[doc = "## Safety"] @@ -90957,7 +87805,7 @@ unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; } @@ -90967,7 +87815,6 @@ unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t let ret_val: int16x4_t = _vshiftins_v4i16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] #[doc = "## Safety"] @@ -90977,13 +87824,12 @@ unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } _vshiftins_v4i32(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] #[doc = "## Safety"] @@ -90993,7 +87839,7 @@ unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } @@ -91003,7 +87849,6 @@ unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t let ret_val: int32x4_t = _vshiftins_v4i32(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] #[doc = "## Safety"] @@ -91013,13 +87858,12 @@ unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } _vshiftins_v8i16(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] #[doc = "## Safety"] @@ -91029,7 +87873,7 @@ unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } @@ -91039,7 +87883,6 @@ unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t let ret_val: int16x8_t = _vshiftins_v8i16(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] #[doc = "## Safety"] @@ -91049,13 +87892,12 @@ unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } _vshiftins_v8i8(a, b, c) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] #[doc = "## Safety"] @@ -91065,7 +87907,7 @@ unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } @@ -91075,7 +87917,6 @@ unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vshiftins_v8i8(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] #[doc = "## Safety"] @@ -91102,7 +87943,6 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_s8(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] #[doc = "## Safety"] @@ -91131,7 +87971,6 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shl(a, vdup_n_s8(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] #[doc = "## Safety"] @@ -91158,7 +87997,6 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_s8(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] #[doc = "## Safety"] @@ -91191,7 +88029,6 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] #[doc = "## Safety"] @@ -91218,7 +88055,6 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_s16(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] #[doc = "## Safety"] @@ -91247,7 +88083,6 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shl(a, vdup_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] #[doc = "## Safety"] @@ -91274,7 +88109,6 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_s16(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] #[doc = "## Safety"] @@ -91303,7 +88137,6 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shl(a, vdupq_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] #[doc = "## Safety"] @@ -91330,7 +88163,6 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_s32(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] #[doc = "## Safety"] @@ -91359,7 +88191,6 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shl(a, vdup_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] #[doc = "## Safety"] @@ -91386,7 +88217,6 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_s32(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] #[doc = "## Safety"] @@ -91415,7 +88245,6 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shl(a, vdupq_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] #[doc = "## Safety"] @@ -91441,7 +88270,6 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_s64(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] #[doc = "## Safety"] @@ -91468,7 +88296,6 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_s64(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] #[doc = "## Safety"] @@ -91497,7 +88324,6 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shl(a, vdupq_n_s64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] #[doc = "## Safety"] @@ -91524,7 +88350,6 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_u8(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] #[doc = "## Safety"] @@ -91553,7 +88378,6 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shl(a, vdup_n_u8(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] #[doc = "## Safety"] @@ -91580,7 +88404,6 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_u8(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] #[doc = "## Safety"] @@ -91613,7 +88436,6 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] #[doc = "## Safety"] @@ -91640,7 +88462,6 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_u16(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] #[doc = "## Safety"] @@ -91669,7 +88490,6 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shl(a, vdup_n_u16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] #[doc = "## Safety"] @@ -91696,7 +88516,6 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_u16(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] #[doc = "## Safety"] @@ -91725,7 +88544,6 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shl(a, vdupq_n_u16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] #[doc = "## Safety"] @@ -91752,7 +88570,6 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_u32(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] #[doc = "## Safety"] @@ -91781,7 +88598,6 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shl(a, vdup_n_u32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] #[doc = "## Safety"] @@ -91808,7 +88624,6 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_u32(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] #[doc = "## Safety"] @@ -91837,7 +88652,6 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shl(a, vdupq_n_u32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] #[doc = "## Safety"] @@ -91863,7 +88677,6 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_u64(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] #[doc = "## Safety"] @@ -91890,7 +88703,6 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_u64(N as _)) } - #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] #[doc = "## Safety"] @@ -91919,7 +88731,6 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shl(a, vdupq_n_u64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] #[doc = "## Safety"] @@ -91942,7 +88753,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -91952,7 +88763,6 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } _vshl_s8(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] #[doc = "## Safety"] @@ -91975,7 +88785,7 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -91988,7 +88798,6 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vshl_s8(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] #[doc = "## Safety"] @@ -92011,7 +88820,7 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92021,7 +88830,6 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } _vshlq_s8(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] #[doc = "## Safety"] @@ -92044,7 +88852,7 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92061,7 +88869,6 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] #[doc = "## Safety"] @@ -92084,7 +88891,7 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92094,7 +88901,6 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } _vshl_s16(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] #[doc = "## Safety"] @@ -92117,7 +88923,7 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92130,7 +88936,6 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = _vshl_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] #[doc = "## Safety"] @@ -92153,7 +88958,7 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92163,7 +88968,6 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } _vshlq_s16(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] #[doc = "## Safety"] @@ -92186,7 +88990,7 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92199,7 +89003,6 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = _vshlq_s16(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] #[doc = "## Safety"] @@ -92222,7 +89025,7 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92232,7 +89035,6 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } _vshl_s32(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] #[doc = "## Safety"] @@ -92255,7 +89057,7 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92268,7 +89070,6 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = _vshl_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] #[doc = "## Safety"] @@ -92291,7 +89092,7 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92301,7 +89102,6 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } _vshlq_s32(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] #[doc = "## Safety"] @@ -92324,7 +89124,7 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92337,7 +89137,6 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = _vshlq_s32(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] #[doc = "## Safety"] @@ -92359,7 +89158,7 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92369,7 +89168,6 @@ pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } _vshl_s64(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] #[doc = "## Safety"] @@ -92392,7 +89190,7 @@ pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92402,7 +89200,6 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } _vshlq_s64(a, b) } - #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] #[doc = "## Safety"] @@ -92425,7 +89222,7 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92438,7 +89235,6 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = _vshlq_s64(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] #[doc = "## Safety"] @@ -92461,7 +89257,7 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92471,7 +89267,6 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } _vshl_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] #[doc = "## Safety"] @@ -92494,7 +89289,7 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92507,7 +89302,6 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = _vshl_u8(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] #[doc = "## Safety"] @@ -92530,7 +89324,7 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92540,7 +89334,6 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } _vshlq_u8(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] #[doc = "## Safety"] @@ -92563,7 +89356,7 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92580,7 +89373,6 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] #[doc = "## Safety"] @@ -92603,7 +89395,7 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92613,7 +89405,6 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } _vshl_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] #[doc = "## Safety"] @@ -92636,7 +89427,7 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92649,7 +89440,6 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = _vshl_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] #[doc = "## Safety"] @@ -92672,7 +89462,7 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92682,7 +89472,6 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } _vshlq_u16(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] #[doc = "## Safety"] @@ -92705,7 +89494,7 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92718,7 +89507,6 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = _vshlq_u16(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] #[doc = "## Safety"] @@ -92741,7 +89529,7 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92751,7 +89539,6 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } _vshl_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] #[doc = "## Safety"] @@ -92774,7 +89561,7 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92787,7 +89574,6 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = _vshl_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] #[doc = "## Safety"] @@ -92810,7 +89596,7 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92820,7 +89606,6 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } _vshlq_u32(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] #[doc = "## Safety"] @@ -92843,7 +89628,7 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92856,7 +89641,6 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = _vshlq_u32(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] #[doc = "## Safety"] @@ -92878,7 +89662,7 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92888,7 +89672,6 @@ pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } _vshl_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] #[doc = "## Safety"] @@ -92911,7 +89694,7 @@ pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92921,7 +89704,6 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } _vshlq_u64(a.as_signed(), b).as_unsigned() } - #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] #[doc = "## Safety"] @@ -92944,7 +89726,7 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -92957,7 +89739,6 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = _vshlq_u64(a.as_signed(), b).as_unsigned(); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] #[doc = "## Safety"] @@ -92984,7 +89765,6 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] #[doc = "## Safety"] @@ -93013,7 +89793,6 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shl(simd_cast(a), vdupq_n_s32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] #[doc = "## Safety"] @@ -93040,7 +89819,6 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] #[doc = "## Safety"] @@ -93069,7 +89847,6 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shl(simd_cast(a), vdupq_n_s64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] #[doc = "## Safety"] @@ -93096,7 +89873,6 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] #[doc = "## Safety"] @@ -93125,7 +89901,6 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shl(simd_cast(a), vdupq_n_s16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] #[doc = "## Safety"] @@ -93152,7 +89927,6 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] #[doc = "## Safety"] @@ -93181,7 +89955,6 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shl(simd_cast(a), vdupq_n_u32(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] #[doc = "## Safety"] @@ -93208,7 +89981,6 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] #[doc = "## Safety"] @@ -93237,7 +90009,6 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shl(simd_cast(a), vdupq_n_u64(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] #[doc = "## Safety"] @@ -93264,7 +90035,6 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) } - #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] #[doc = "## Safety"] @@ -93293,7 +90063,6 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shl(simd_cast(a), vdupq_n_u16(N as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] #[doc = "## Safety"] @@ -93321,7 +90090,6 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { let n: i32 = if N == 8 { 7 } else { N }; simd_shr(a, vdup_n_s8(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] #[doc = "## Safety"] @@ -93351,7 +90119,6 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_shr(a, vdup_n_s8(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] #[doc = "## Safety"] @@ -93379,7 +90146,6 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { let n: i32 = if N == 8 { 7 } else { N }; simd_shr(a, vdupq_n_s8(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] #[doc = "## Safety"] @@ -93413,7 +90179,6 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] #[doc = "## Safety"] @@ -93441,7 +90206,6 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { let n: i32 = if N == 16 { 15 } else { N }; simd_shr(a, vdup_n_s16(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] #[doc = "## Safety"] @@ -93471,7 +90235,6 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_shr(a, vdup_n_s16(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] #[doc = "## Safety"] @@ -93499,7 +90262,6 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { let n: i32 = if N == 16 { 15 } else { N }; simd_shr(a, vdupq_n_s16(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] #[doc = "## Safety"] @@ -93529,7 +90291,6 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_shr(a, vdupq_n_s16(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] #[doc = "## Safety"] @@ -93557,7 +90318,6 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { let n: i32 = if N == 32 { 31 } else { N }; simd_shr(a, vdup_n_s32(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] #[doc = "## Safety"] @@ -93587,7 +90347,6 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_shr(a, vdup_n_s32(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] #[doc = "## Safety"] @@ -93615,7 +90374,6 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { let n: i32 = if N == 32 { 31 } else { N }; simd_shr(a, vdupq_n_s32(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] #[doc = "## Safety"] @@ -93645,7 +90403,6 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_shr(a, vdupq_n_s32(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] #[doc = "## Safety"] @@ -93672,7 +90429,6 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { let n: i32 = if N == 64 { 63 } else { N }; simd_shr(a, vdup_n_s64(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] #[doc = "## Safety"] @@ -93700,7 +90456,6 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { let n: i32 = if N == 64 { 63 } else { N }; simd_shr(a, vdupq_n_s64(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] #[doc = "## Safety"] @@ -93730,7 +90485,6 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_shr(a, vdupq_n_s64(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] #[doc = "## Safety"] @@ -93762,7 +90516,6 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { }; simd_shr(a, vdup_n_u8(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] #[doc = "## Safety"] @@ -93796,7 +90549,6 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_shr(a, vdup_n_u8(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] #[doc = "## Safety"] @@ -93828,7 +90580,6 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { }; simd_shr(a, vdupq_n_u8(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] #[doc = "## Safety"] @@ -93866,7 +90617,6 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] #[doc = "## Safety"] @@ -93898,7 +90648,6 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { }; simd_shr(a, vdup_n_u16(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] #[doc = "## Safety"] @@ -93932,7 +90681,6 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_shr(a, vdup_n_u16(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] #[doc = "## Safety"] @@ -93964,7 +90712,6 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { }; simd_shr(a, vdupq_n_u16(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] #[doc = "## Safety"] @@ -93998,7 +90745,6 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_shr(a, vdupq_n_u16(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] #[doc = "## Safety"] @@ -94030,7 +90776,6 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { }; simd_shr(a, vdup_n_u32(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] #[doc = "## Safety"] @@ -94064,7 +90809,6 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_shr(a, vdup_n_u32(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] #[doc = "## Safety"] @@ -94096,7 +90840,6 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { }; simd_shr(a, vdupq_n_u32(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] #[doc = "## Safety"] @@ -94130,7 +90873,6 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_shr(a, vdupq_n_u32(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] #[doc = "## Safety"] @@ -94161,7 +90903,6 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { }; simd_shr(a, vdup_n_u64(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] #[doc = "## Safety"] @@ -94193,7 +90934,6 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { }; simd_shr(a, vdupq_n_u64(n as _)) } - #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] #[doc = "## Safety"] @@ -94227,7 +90967,6 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_shr(a, vdupq_n_u64(n as _)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] #[doc = "## Safety"] @@ -94254,7 +90993,6 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] #[doc = "## Safety"] @@ -94283,7 +91021,6 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_cast(simd_shr(a, vdupq_n_s16(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] #[doc = "## Safety"] @@ -94310,7 +91047,6 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] #[doc = "## Safety"] @@ -94339,7 +91075,6 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_cast(simd_shr(a, vdupq_n_s32(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] #[doc = "## Safety"] @@ -94366,7 +91101,6 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] #[doc = "## Safety"] @@ -94395,7 +91129,6 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_cast(simd_shr(a, vdupq_n_s64(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] #[doc = "## Safety"] @@ -94422,7 +91155,6 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] #[doc = "## Safety"] @@ -94451,7 +91183,6 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_cast(simd_shr(a, vdupq_n_u16(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] #[doc = "## Safety"] @@ -94478,7 +91209,6 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] #[doc = "## Safety"] @@ -94507,7 +91237,6 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_cast(simd_shr(a, vdupq_n_u32(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] #[doc = "## Safety"] @@ -94534,7 +91263,6 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) } - #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] #[doc = "## Safety"] @@ -94563,7 +91291,6 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_cast(simd_shr(a, vdupq_n_u64(N as _))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] @@ -94579,7 +91306,6 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] @@ -94598,7 +91324,6 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] @@ -94614,7 +91339,6 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t static_assert_uimm_bits!(N, 3); vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] @@ -94637,7 +91361,6 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] @@ -94653,7 +91376,6 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t static_assert_uimm_bits!(N, 4); vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] @@ -94672,7 +91394,6 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] @@ -94688,7 +91409,6 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t static_assert_uimm_bits!(N, 4); vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] @@ -94707,7 +91427,6 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] @@ -94723,7 +91442,6 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t static_assert!(N >= 0 && N <= 31); vshiftins_v2i32(a, b, int32x2_t::splat(N)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] @@ -94742,7 +91460,6 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(N)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] @@ -94758,7 +91475,6 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t static_assert!(N >= 0 && N <= 31); vshiftins_v4i32(a, b, int32x4_t::splat(N)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] @@ -94777,7 +91493,6 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(N)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] #[doc = "## Safety"] @@ -94792,7 +91507,6 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t static_assert!(N >= 0 && N <= 63); vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] @@ -94808,7 +91522,6 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t static_assert!(N >= 0 && N <= 63); vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] @@ -94827,7 +91540,6 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] @@ -94847,7 +91559,6 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { int8x8_t::splat(N as i8), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] @@ -94870,7 +91581,6 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] @@ -94890,7 +91600,6 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 int8x16_t::splat(N as i8), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] @@ -94917,7 +91626,6 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] @@ -94937,7 +91645,6 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 int16x4_t::splat(N as i16), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] @@ -94960,7 +91667,6 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] @@ -94980,7 +91686,6 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x int16x8_t::splat(N as i16), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] @@ -95003,7 +91708,6 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] @@ -95023,7 +91727,6 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 int32x2_t::splat(N as i32), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] @@ -95046,7 +91749,6 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 )); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] @@ -95066,7 +91768,6 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x int32x4_t::splat(N as i32), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] @@ -95089,7 +91790,6 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] #[doc = "## Safety"] @@ -95108,7 +91808,6 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 int64x1_t::splat(N as i64), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] @@ -95128,7 +91827,6 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x int64x2_t::splat(N as i64), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] @@ -95151,7 +91849,6 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x )); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] @@ -95171,7 +91868,6 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { int8x8_t::splat(N as i8), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] @@ -95194,7 +91890,6 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] @@ -95214,7 +91909,6 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 int8x16_t::splat(N as i8), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] @@ -95241,7 +91935,6 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] @@ -95261,7 +91954,6 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 int16x4_t::splat(N as i16), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] @@ -95284,7 +91976,6 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] @@ -95304,7 +91995,6 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x int16x8_t::splat(N as i16), )) } - #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] @@ -95327,7 +92017,6 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] #[doc = "## Safety"] @@ -95354,7 +92043,6 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_s8::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] #[doc = "## Safety"] @@ -95384,7 +92072,6 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_add(a, vshr_n_s8::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] #[doc = "## Safety"] @@ -95411,7 +92098,6 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] #[doc = "## Safety"] @@ -95445,7 +92131,6 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] #[doc = "## Safety"] @@ -95472,7 +92157,6 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_s16::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] #[doc = "## Safety"] @@ -95502,7 +92186,6 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = simd_add(a, vshr_n_s16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] #[doc = "## Safety"] @@ -95529,7 +92212,6 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] #[doc = "## Safety"] @@ -95559,7 +92241,6 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = simd_add(a, vshrq_n_s16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] #[doc = "## Safety"] @@ -95586,7 +92267,6 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_s32::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] #[doc = "## Safety"] @@ -95616,7 +92296,6 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = simd_add(a, vshr_n_s32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] #[doc = "## Safety"] @@ -95643,7 +92322,6 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] #[doc = "## Safety"] @@ -95673,7 +92351,6 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = simd_add(a, vshrq_n_s32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] #[doc = "## Safety"] @@ -95699,7 +92376,6 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_s64::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] #[doc = "## Safety"] @@ -95726,7 +92402,6 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::(b)) } - #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] #[doc = "## Safety"] @@ -95756,7 +92431,6 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = simd_add(a, vshrq_n_s64::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] #[doc = "## Safety"] @@ -95783,7 +92457,6 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_u8::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] #[doc = "## Safety"] @@ -95813,7 +92486,6 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_add(a, vshr_n_u8::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] #[doc = "## Safety"] @@ -95840,7 +92512,6 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] #[doc = "## Safety"] @@ -95874,7 +92545,6 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] #[doc = "## Safety"] @@ -95901,7 +92571,6 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_u16::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] #[doc = "## Safety"] @@ -95931,7 +92600,6 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 let ret_val: uint16x4_t = simd_add(a, vshr_n_u16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] #[doc = "## Safety"] @@ -95958,7 +92626,6 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] #[doc = "## Safety"] @@ -95988,7 +92655,6 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x let ret_val: uint16x8_t = simd_add(a, vshrq_n_u16::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] #[doc = "## Safety"] @@ -96015,7 +92681,6 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_u32::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] #[doc = "## Safety"] @@ -96045,7 +92710,6 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 let ret_val: uint32x2_t = simd_add(a, vshr_n_u32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] #[doc = "## Safety"] @@ -96072,7 +92736,6 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] #[doc = "## Safety"] @@ -96102,7 +92765,6 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x let ret_val: uint32x4_t = simd_add(a, vshrq_n_u32::(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] #[doc = "## Safety"] @@ -96128,7 +92790,6 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_u64::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] #[doc = "## Safety"] @@ -96155,7 +92816,6 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::(b)) } - #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] #[doc = "## Safety"] @@ -96185,7 +92845,6 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x let ret_val: uint64x2_t = simd_add(a, vshrq_n_u64::(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] @@ -96201,7 +92860,6 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(1 <= N && N <= 8); vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] @@ -96220,7 +92878,6 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] @@ -96236,7 +92893,6 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t static_assert!(1 <= N && N <= 8); vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] @@ -96259,7 +92915,6 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] @@ -96275,7 +92930,6 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t static_assert!(1 <= N && N <= 16); vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] @@ -96294,7 +92948,6 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] @@ -96310,7 +92963,6 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t static_assert!(1 <= N && N <= 16); vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] @@ -96329,7 +92981,6 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] @@ -96345,7 +92996,6 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t static_assert!(1 <= N && N <= 32); vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] @@ -96364,7 +93014,6 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] @@ -96380,7 +93029,6 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t static_assert!(1 <= N && N <= 32); vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] @@ -96399,7 +93047,6 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[doc = "## Safety"] @@ -96414,7 +93061,6 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t static_assert!(1 <= N && N <= 64); vshiftins_v1i64(a, b, int64x1_t::splat(-N as i64)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] @@ -96430,7 +93076,6 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t static_assert!(1 <= N && N <= 64); vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] @@ -96449,7 +93094,6 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] @@ -96469,7 +93113,6 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { int8x8_t::splat(-N as i8), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] @@ -96492,7 +93135,6 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] @@ -96512,7 +93154,6 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 int8x16_t::splat(-N as i8), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] @@ -96539,7 +93180,6 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] @@ -96559,7 +93199,6 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 int16x4_t::splat(-N as i16), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] @@ -96582,7 +93221,6 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] @@ -96602,7 +93240,6 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x int16x8_t::splat(-N as i16), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] @@ -96625,7 +93262,6 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] @@ -96645,7 +93281,6 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 int32x2_t::splat(-N), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] @@ -96668,7 +93303,6 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 )); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] @@ -96688,7 +93322,6 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x int32x4_t::splat(-N), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] @@ -96711,7 +93344,6 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[doc = "## Safety"] @@ -96730,7 +93362,6 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 int64x1_t::splat(-N as i64), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] @@ -96750,7 +93381,6 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x int64x2_t::splat(-N as i64), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] @@ -96773,7 +93403,6 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x )); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] @@ -96793,7 +93422,6 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { int8x8_t::splat(-N as i8), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] @@ -96816,7 +93444,6 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] @@ -96836,7 +93463,6 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 int8x16_t::splat(-N as i8), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] @@ -96863,7 +93489,6 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] @@ -96883,7 +93508,6 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 int16x4_t::splat(-N as i16), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] @@ -96906,7 +93530,6 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] @@ -96926,7 +93549,6 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x int16x8_t::splat(-N as i16), )) } - #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] @@ -96949,7 +93571,6 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x )); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] @@ -96968,7 +93589,6 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] @@ -96988,7 +93608,6 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] @@ -97007,7 +93626,6 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] @@ -97027,7 +93645,6 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] @@ -97042,7 +93659,6 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] @@ -97058,7 +93674,6 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] @@ -97073,7 +93688,6 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] @@ -97089,7 +93703,6 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] @@ -97104,7 +93717,6 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] @@ -97120,7 +93732,6 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] @@ -97135,7 +93746,6 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] @@ -97151,7 +93761,6 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] @@ -97166,7 +93775,6 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] @@ -97182,7 +93790,6 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] @@ -97197,7 +93804,6 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] @@ -97213,7 +93819,6 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] @@ -97227,7 +93832,6 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { vst1_v1i64(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] @@ -97242,7 +93846,6 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] @@ -97258,7 +93861,6 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] @@ -97277,7 +93879,6 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] @@ -97297,7 +93898,6 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] @@ -97316,7 +93916,6 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] @@ -97336,7 +93935,6 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] @@ -97355,7 +93953,6 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] @@ -97375,7 +93972,6 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] @@ -97394,7 +93990,6 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] @@ -97414,7 +94009,6 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] @@ -97433,7 +94027,6 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] @@ -97453,7 +94046,6 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] @@ -97472,7 +94064,6 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] @@ -97492,7 +94083,6 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] @@ -97510,7 +94100,6 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] @@ -97529,7 +94118,6 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] @@ -97549,7 +94137,6 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] @@ -97568,7 +94155,6 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] @@ -97588,7 +94174,6 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] @@ -97607,7 +94192,6 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] @@ -97627,7 +94211,6 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] @@ -97646,7 +94229,6 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] @@ -97666,7 +94248,6 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] @@ -97685,7 +94266,6 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] @@ -97705,7 +94285,6 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] @@ -97723,7 +94302,6 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] @@ -97742,7 +94320,6 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] @@ -97762,7 +94339,6 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { crate::mem::align_of::() as i32, ) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] @@ -97774,13 +94350,12 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); } _vst1_f32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] @@ -97792,7 +94367,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); } @@ -97801,7 +94376,6 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1_f32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] @@ -97813,13 +94387,12 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); } _vst1q_f32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] @@ -97831,7 +94404,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); } @@ -97840,7 +94413,6 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1q_f32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] @@ -97852,7 +94424,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" @@ -97861,7 +94433,6 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { } _vst1_f32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] @@ -97873,7 +94444,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" @@ -97885,7 +94456,6 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1_f32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] @@ -97897,7 +94467,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" @@ -97906,7 +94476,6 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { } _vst1q_f32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] @@ -97918,7 +94487,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" @@ -97930,7 +94499,6 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1q_f32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] @@ -97942,13 +94510,12 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); } _vst1_f32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] @@ -97960,7 +94527,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); } @@ -97970,7 +94537,6 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1_f32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] @@ -97982,13 +94548,12 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); } _vst1q_f32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] @@ -98000,7 +94565,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); } @@ -98010,7 +94575,6 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1q_f32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] @@ -98022,7 +94586,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" @@ -98031,7 +94595,6 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { } _vst1_f32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] @@ -98043,7 +94606,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" @@ -98056,7 +94619,6 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1_f32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] @@ -98068,7 +94630,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" @@ -98077,7 +94639,6 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { } _vst1q_f32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] @@ -98089,7 +94650,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" @@ -98102,7 +94663,6 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1q_f32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] @@ -98114,7 +94674,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] fn _vst1_f32_x4( ptr: *mut f32, @@ -98126,7 +94686,6 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { } _vst1_f32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] @@ -98138,7 +94697,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] fn _vst1_f32_x4( ptr: *mut f32, @@ -98155,7 +94714,6 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1_f32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] @@ -98167,7 +94725,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] fn _vst1q_f32_x4( ptr: *mut f32, @@ -98179,7 +94737,6 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { } _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] @@ -98191,7 +94748,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] fn _vst1q_f32_x4( ptr: *mut f32, @@ -98208,7 +94765,6 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] @@ -98220,7 +94776,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" @@ -98235,7 +94791,6 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { } _vst1_f32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] @@ -98247,7 +94802,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" @@ -98267,7 +94822,6 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1_f32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] @@ -98279,7 +94833,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" @@ -98294,7 +94848,6 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { } _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] @@ -98306,7 +94859,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" @@ -98326,7 +94879,6 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] #[doc = "## Safety"] @@ -98353,7 +94905,6 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] #[doc = "## Safety"] @@ -98381,7 +94932,6 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] #[doc = "## Safety"] @@ -98408,7 +94958,6 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] #[doc = "## Safety"] @@ -98436,7 +94985,6 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] #[doc = "## Safety"] @@ -98463,7 +95011,6 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] #[doc = "## Safety"] @@ -98491,7 +95038,6 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] #[doc = "## Safety"] @@ -98518,7 +95064,6 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] #[doc = "## Safety"] @@ -98546,7 +95091,6 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] #[doc = "## Safety"] @@ -98573,7 +95117,6 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] #[doc = "## Safety"] @@ -98601,7 +95144,6 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] #[doc = "## Safety"] @@ -98628,7 +95170,6 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] #[doc = "## Safety"] @@ -98656,7 +95197,6 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] #[doc = "## Safety"] @@ -98683,7 +95223,6 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] #[doc = "## Safety"] @@ -98711,7 +95250,6 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] #[doc = "## Safety"] @@ -98738,7 +95276,6 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] #[doc = "## Safety"] @@ -98766,7 +95303,6 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] #[doc = "## Safety"] @@ -98793,7 +95329,6 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] #[doc = "## Safety"] @@ -98821,7 +95356,6 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] #[doc = "## Safety"] @@ -98848,7 +95382,6 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] #[doc = "## Safety"] @@ -98876,7 +95409,6 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] #[doc = "## Safety"] @@ -98903,7 +95435,6 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] #[doc = "## Safety"] @@ -98931,7 +95462,6 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] #[doc = "## Safety"] @@ -98958,7 +95488,6 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] #[doc = "## Safety"] @@ -98986,7 +95515,6 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] #[doc = "## Safety"] @@ -99013,7 +95541,6 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] #[doc = "## Safety"] @@ -99041,7 +95568,6 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] #[doc = "## Safety"] @@ -99068,7 +95594,6 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] #[doc = "## Safety"] @@ -99096,7 +95621,6 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] #[doc = "## Safety"] @@ -99123,7 +95647,6 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] #[doc = "## Safety"] @@ -99151,7 +95674,6 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] #[doc = "## Safety"] @@ -99178,7 +95700,6 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] #[doc = "## Safety"] @@ -99206,7 +95727,6 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] #[doc = "## Safety"] @@ -99233,7 +95753,6 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] #[doc = "## Safety"] @@ -99261,7 +95780,6 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] #[doc = "## Safety"] @@ -99288,7 +95806,6 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] #[doc = "## Safety"] @@ -99316,7 +95833,6 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] #[doc = "## Safety"] @@ -99343,7 +95859,6 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] #[doc = "## Safety"] @@ -99371,7 +95886,6 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] #[doc = "## Safety"] @@ -99398,7 +95912,6 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] #[doc = "## Safety"] @@ -99426,7 +95939,6 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] #[doc = "## Safety"] @@ -99452,7 +95964,6 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { static_assert!(LANE == 0); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] #[doc = "## Safety"] @@ -99478,7 +95989,6 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { static_assert!(LANE == 0); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] #[doc = "## Safety"] @@ -99504,7 +96014,6 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { static_assert!(LANE == 0); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] #[doc = "## Safety"] @@ -99528,7 +96037,6 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] #[doc = "## Safety"] @@ -99552,7 +96060,6 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] #[doc = "## Safety"] @@ -99576,7 +96083,6 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] #[doc = "## Safety"] @@ -99601,7 +96107,6 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] #[doc = "## Safety"] @@ -99629,7 +96134,6 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst1q_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] #[doc = "## Safety"] @@ -99654,7 +96158,6 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] #[doc = "## Safety"] @@ -99683,7 +96186,6 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst1q_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] #[doc = "## Safety"] @@ -99708,7 +96210,6 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] #[doc = "## Safety"] @@ -99738,7 +96239,6 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst1q_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] @@ -99750,7 +96250,7 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" @@ -99759,7 +96259,6 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { } _vst1_s8_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] @@ -99771,7 +96270,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" @@ -99783,7 +96282,6 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] @@ -99795,7 +96293,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" @@ -99804,7 +96302,6 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { } _vst1q_s8_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] @@ -99816,7 +96313,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" @@ -99836,7 +96333,6 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { ); _vst1q_s8_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] @@ -99848,7 +96344,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" @@ -99857,7 +96353,6 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { } _vst1_s16_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] @@ -99869,7 +96364,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" @@ -99881,7 +96376,6 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1_s16_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] @@ -99893,7 +96387,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" @@ -99902,7 +96396,6 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { } _vst1q_s16_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] @@ -99914,7 +96407,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" @@ -99926,7 +96419,6 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] @@ -99938,7 +96430,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" @@ -99947,7 +96439,6 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { } _vst1_s32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] @@ -99959,7 +96450,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" @@ -99971,7 +96462,6 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1_s32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] @@ -99983,7 +96473,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" @@ -99992,7 +96482,6 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { } _vst1q_s32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] @@ -100004,7 +96493,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" @@ -100016,7 +96505,6 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1q_s32_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] @@ -100027,7 +96515,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" @@ -100036,7 +96524,6 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { } _vst1_s64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] @@ -100048,7 +96535,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" @@ -100057,7 +96544,6 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { } _vst1q_s64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] @@ -100069,7 +96555,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" @@ -100081,7 +96567,6 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1q_s64_x2(b.0, b.1, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] @@ -100093,13 +96578,12 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } _vst1_s8_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] @@ -100111,7 +96595,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } @@ -100120,7 +96604,6 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] @@ -100132,13 +96615,12 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } _vst1q_s8_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] @@ -100150,7 +96632,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } @@ -100167,7 +96649,6 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { ); _vst1q_s8_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] @@ -100179,13 +96660,12 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } _vst1_s16_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] @@ -100197,7 +96677,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } @@ -100206,7 +96686,6 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1_s16_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] @@ -100218,13 +96697,12 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } _vst1q_s16_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] @@ -100236,7 +96714,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } @@ -100245,7 +96723,6 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] @@ -100257,13 +96734,12 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } _vst1_s32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] @@ -100275,7 +96751,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } @@ -100284,7 +96760,6 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1_s32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] @@ -100296,13 +96771,12 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } _vst1q_s32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] @@ -100314,7 +96788,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } @@ -100323,7 +96797,6 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst1q_s32_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] @@ -100334,13 +96807,12 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); } _vst1_s64_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] @@ -100352,13 +96824,12 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } _vst1q_s64_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] @@ -100370,7 +96841,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } @@ -100379,7 +96850,6 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst1q_s64_x2(a, b.0, b.1) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] @@ -100391,7 +96861,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" @@ -100400,7 +96870,6 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { } _vst1_s8_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] @@ -100412,7 +96881,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" @@ -100425,7 +96894,6 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] @@ -100437,7 +96905,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" @@ -100446,7 +96914,6 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { } _vst1q_s8_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] @@ -100458,7 +96925,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" @@ -100483,7 +96950,6 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { ); _vst1q_s8_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] @@ -100495,7 +96961,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" @@ -100504,7 +96970,6 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { } _vst1_s16_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] @@ -100516,7 +96981,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" @@ -100529,7 +96994,6 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1_s16_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] @@ -100541,7 +97005,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" @@ -100550,7 +97014,6 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { } _vst1q_s16_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] @@ -100562,7 +97025,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" @@ -100575,7 +97038,6 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] @@ -100587,7 +97049,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" @@ -100596,7 +97058,6 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { } _vst1_s32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] @@ -100608,7 +97069,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" @@ -100621,7 +97082,6 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1_s32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] @@ -100633,7 +97093,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" @@ -100642,7 +97102,6 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { } _vst1q_s32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] @@ -100654,7 +97113,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" @@ -100667,7 +97126,6 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1q_s32_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] @@ -100678,7 +97136,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" @@ -100687,7 +97145,6 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { } _vst1_s64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] @@ -100699,7 +97156,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" @@ -100708,7 +97165,6 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { } _vst1q_s64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] @@ -100720,7 +97176,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" @@ -100733,7 +97189,6 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1q_s64_x3(b.0, b.1, b.2, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] @@ -100745,13 +97200,12 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); } _vst1_s8_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] @@ -100763,7 +97217,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); } @@ -100773,7 +97227,6 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] @@ -100785,13 +97238,12 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); } _vst1q_s8_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] @@ -100803,7 +97255,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); } @@ -100825,7 +97277,6 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { ); _vst1q_s8_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] @@ -100837,13 +97288,12 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); } _vst1_s16_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] @@ -100855,7 +97305,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); } @@ -100865,7 +97315,6 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1_s16_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] @@ -100877,13 +97326,12 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } _vst1q_s16_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] @@ -100895,7 +97343,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } @@ -100905,7 +97353,6 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] @@ -100917,13 +97364,12 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } _vst1_s32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] @@ -100935,7 +97381,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } @@ -100945,7 +97391,6 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1_s32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] @@ -100957,13 +97402,12 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } _vst1q_s32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] @@ -100975,7 +97419,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } @@ -100985,7 +97429,6 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst1q_s32_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] @@ -100996,13 +97439,12 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64.p0")] fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); } _vst1_s64_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] @@ -101014,13 +97456,12 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } _vst1q_s64_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] @@ -101032,7 +97473,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } @@ -101042,7 +97483,6 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst1q_s64_x3(a, b.0, b.1, b.2) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] @@ -101054,7 +97494,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" @@ -101063,7 +97503,6 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { } _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] @@ -101075,7 +97514,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" @@ -101089,7 +97528,6 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] @@ -101101,7 +97539,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" @@ -101110,7 +97548,6 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { } _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] @@ -101122,7 +97559,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" @@ -101152,7 +97589,6 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { ); _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] @@ -101164,7 +97600,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" @@ -101173,7 +97609,6 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { } _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] @@ -101185,7 +97620,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" @@ -101199,7 +97634,6 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] @@ -101211,7 +97645,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" @@ -101220,7 +97654,6 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { } _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] @@ -101232,7 +97665,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" @@ -101246,7 +97679,6 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] @@ -101258,7 +97690,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" @@ -101267,7 +97699,6 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { } _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] @@ -101279,7 +97710,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" @@ -101293,7 +97724,6 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] @@ -101305,7 +97735,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" @@ -101314,7 +97744,6 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { } _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] @@ -101326,7 +97755,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" @@ -101340,7 +97769,6 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] @@ -101351,7 +97779,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" @@ -101360,7 +97788,6 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { } _vst1_s64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] @@ -101372,7 +97799,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" @@ -101381,7 +97808,6 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { } _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] @@ -101393,7 +97819,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" @@ -101407,7 +97833,6 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] @@ -101419,13 +97844,12 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] @@ -101437,7 +97861,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } @@ -101448,7 +97872,6 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] @@ -101460,13 +97883,12 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] @@ -101478,7 +97900,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } @@ -101505,7 +97927,6 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { ); _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] @@ -101517,13 +97938,12 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] @@ -101535,7 +97955,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } @@ -101546,7 +97966,6 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] @@ -101558,13 +97977,12 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] @@ -101576,7 +97994,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } @@ -101587,7 +98005,6 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] @@ -101599,13 +98016,12 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); } _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] @@ -101617,7 +98033,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); } @@ -101628,7 +98044,6 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] @@ -101640,13 +98055,12 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); } _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] @@ -101658,7 +98072,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); } @@ -101669,7 +98083,6 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] @@ -101680,13 +98093,12 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64.p0")] fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); } _vst1_s64_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] @@ -101698,13 +98110,12 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); } _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] @@ -101716,7 +98127,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); } @@ -101727,7 +98138,6 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] @@ -101752,7 +98162,6 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] @@ -101780,7 +98189,6 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] @@ -101805,7 +98213,6 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] @@ -101834,7 +98241,6 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] @@ -101859,7 +98265,6 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] @@ -101889,7 +98294,6 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] @@ -101914,7 +98318,6 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] @@ -101950,7 +98353,6 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { ); vst1q_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] @@ -101975,7 +98377,6 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] @@ -102016,7 +98417,6 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { ); vst1q_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] @@ -102041,7 +98441,6 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] @@ -102087,7 +98486,6 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { ); vst1q_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] @@ -102112,7 +98510,6 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] @@ -102140,7 +98537,6 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst1_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] @@ -102165,7 +98561,6 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] @@ -102194,7 +98589,6 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst1_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] @@ -102219,7 +98613,6 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] @@ -102249,7 +98642,6 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst1_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] @@ -102274,7 +98666,6 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] @@ -102302,7 +98693,6 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] @@ -102327,7 +98717,6 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] @@ -102356,7 +98745,6 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] @@ -102381,7 +98769,6 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] @@ -102411,7 +98798,6 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] @@ -102436,7 +98822,6 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { vst1_s32_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] @@ -102464,7 +98849,6 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst1_s32_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] @@ -102489,7 +98873,6 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { vst1_s32_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] @@ -102518,7 +98901,6 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst1_s32_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] @@ -102543,7 +98925,6 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { vst1_s32_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] @@ -102573,7 +98954,6 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst1_s32_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] @@ -102598,7 +98978,6 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { vst1q_s32_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] @@ -102626,7 +99005,6 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst1q_s32_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] @@ -102651,7 +99029,6 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { vst1q_s32_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] @@ -102680,7 +99057,6 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst1q_s32_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] @@ -102705,7 +99081,6 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { vst1q_s32_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] @@ -102735,7 +99110,6 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst1q_s32_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] #[doc = "## Safety"] @@ -102759,7 +99133,6 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] #[doc = "## Safety"] @@ -102783,7 +99156,6 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] #[doc = "## Safety"] @@ -102807,7 +99179,6 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] @@ -102832,7 +99203,6 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] @@ -102860,7 +99230,6 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst1q_s64_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] @@ -102885,7 +99254,6 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] @@ -102914,7 +99282,6 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst1q_s64_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] @@ -102939,7 +99306,6 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] @@ -102969,7 +99335,6 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst1q_s64_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] @@ -102994,7 +99359,6 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] @@ -103022,7 +99386,6 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] @@ -103047,7 +99410,6 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] @@ -103076,7 +99438,6 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] @@ -103101,7 +99462,6 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] @@ -103131,7 +99491,6 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst1_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] @@ -103156,7 +99515,6 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] @@ -103192,7 +99550,6 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { ); vst1q_s8_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] @@ -103217,7 +99574,6 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] @@ -103258,7 +99614,6 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { ); vst1q_s8_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] @@ -103283,7 +99638,6 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] @@ -103329,7 +99683,6 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { ); vst1q_s8_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] @@ -103354,7 +99707,6 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] @@ -103382,7 +99734,6 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst1_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] @@ -103407,7 +99758,6 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] @@ -103436,7 +99786,6 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst1_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] @@ -103461,7 +99810,6 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] @@ -103491,7 +99839,6 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst1_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] @@ -103516,7 +99863,6 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] @@ -103544,7 +99890,6 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x2(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] @@ -103569,7 +99914,6 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] @@ -103598,7 +99942,6 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x3(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] @@ -103623,7 +99966,6 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] @@ -103653,7 +99995,6 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst1q_s16_x4(transmute(a), transmute(b)) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v1i64)"] #[doc = "## Safety"] @@ -103665,13 +100006,12 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); } _vst1_v1i64(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] #[doc = "## Safety"] @@ -103684,13 +100024,12 @@ unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); } _vst1_v2f32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] #[doc = "## Safety"] @@ -103703,14 +100042,13 @@ unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); } let val: float32x2_t = simd_shuffle!(val, val, [0, 1]); _vst1_v2f32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] #[doc = "## Safety"] @@ -103723,13 +100061,12 @@ unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); } _vst1_v2i32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] #[doc = "## Safety"] @@ -103742,14 +100079,13 @@ unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); } let val: int32x2_t = simd_shuffle!(val, val, [0, 1]); _vst1_v2i32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] #[doc = "## Safety"] @@ -103762,13 +100098,12 @@ unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); } _vst1_v4i16(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] #[doc = "## Safety"] @@ -103781,14 +100116,13 @@ unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); } let val: int16x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); _vst1_v4i16(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] #[doc = "## Safety"] @@ -103801,13 +100135,12 @@ unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); } _vst1_v8i8(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] #[doc = "## Safety"] @@ -103820,14 +100153,13 @@ unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); } let val: int8x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1_v8i8(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] #[doc = "## Safety"] @@ -103840,13 +100172,12 @@ unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); } _vst1q_v16i8(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] #[doc = "## Safety"] @@ -103859,7 +100190,7 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); } @@ -103870,7 +100201,6 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { ); _vst1q_v16i8(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] #[doc = "## Safety"] @@ -103883,13 +100213,12 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); } _vst1q_v2i64(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] #[doc = "## Safety"] @@ -103902,14 +100231,13 @@ unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); } let val: int64x2_t = simd_shuffle!(val, val, [0, 1]); _vst1q_v2i64(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] #[doc = "## Safety"] @@ -103922,13 +100250,12 @@ unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); } _vst1q_v4f32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] #[doc = "## Safety"] @@ -103941,14 +100268,13 @@ unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); } let val: float32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); _vst1q_v4f32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] #[doc = "## Safety"] @@ -103961,13 +100287,12 @@ unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); } _vst1q_v4i32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] #[doc = "## Safety"] @@ -103980,14 +100305,13 @@ unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); } let val: int32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); _vst1q_v4i32(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] #[doc = "## Safety"] @@ -104000,13 +100324,12 @@ unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); } _vst1q_v8i16(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] #[doc = "## Safety"] @@ -104019,14 +100342,13 @@ unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); } let val: int16x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); _vst1q_v8i16(addr, val, align) } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] #[doc = "## Safety"] @@ -104053,7 +100375,6 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple single-element structures from one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] #[doc = "## Safety"] @@ -104081,7 +100402,6 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); *a = simd_extract!(b, LANE as u32); } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] @@ -104093,7 +100413,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" @@ -104102,7 +100422,6 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { } _vst2_f32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] @@ -104114,7 +100433,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" @@ -104126,7 +100445,6 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_f32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] @@ -104138,7 +100456,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" @@ -104147,7 +100465,6 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { } _vst2q_f32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] @@ -104159,7 +100476,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" @@ -104171,7 +100488,6 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_f32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] @@ -104183,7 +100499,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" @@ -104192,7 +100508,6 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { } _vst2_s8(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] @@ -104204,7 +100519,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" @@ -104216,7 +100531,6 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2_s8(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] @@ -104228,7 +100542,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" @@ -104237,7 +100551,6 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { } _vst2q_s8(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] @@ -104249,7 +100562,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" @@ -104269,7 +100582,6 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { ); _vst2q_s8(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] @@ -104281,7 +100593,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" @@ -104290,7 +100602,6 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { } _vst2_s16(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] @@ -104302,7 +100613,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" @@ -104314,7 +100625,6 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2_s16(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] @@ -104326,7 +100636,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" @@ -104335,7 +100645,6 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { } _vst2q_s16(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] @@ -104347,7 +100656,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" @@ -104359,7 +100668,6 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2q_s16(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] @@ -104371,7 +100679,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" @@ -104380,7 +100688,6 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { } _vst2_s32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] @@ -104392,7 +100699,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" @@ -104404,7 +100711,6 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_s32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] @@ -104416,7 +100722,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" @@ -104425,7 +100731,6 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { } _vst2q_s32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] @@ -104437,7 +100742,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" @@ -104449,7 +100754,6 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_s32(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] @@ -104461,13 +100765,12 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } _vst2_f32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] @@ -104479,7 +100782,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } @@ -104488,7 +100791,6 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_f32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] @@ -104500,13 +100802,12 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } _vst2q_f32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] @@ -104518,7 +100819,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } @@ -104527,7 +100828,6 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_f32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] @@ -104539,13 +100839,12 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } _vst2_s8(a as _, b.0, b.1, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] @@ -104557,7 +100856,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } @@ -104566,7 +100865,6 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2_s8(a as _, b.0, b.1, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] @@ -104578,13 +100876,12 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } _vst2q_s8(a as _, b.0, b.1, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] @@ -104596,7 +100893,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } @@ -104613,7 +100910,6 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { ); _vst2q_s8(a as _, b.0, b.1, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] @@ -104625,13 +100921,12 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } _vst2_s16(a as _, b.0, b.1, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] @@ -104643,7 +100938,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } @@ -104652,7 +100947,6 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2_s16(a as _, b.0, b.1, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] @@ -104664,13 +100958,12 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } _vst2q_s16(a as _, b.0, b.1, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] @@ -104682,7 +100975,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } @@ -104691,7 +100984,6 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2q_s16(a as _, b.0, b.1, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] @@ -104703,13 +100995,12 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } _vst2_s32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] @@ -104721,7 +101012,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); } @@ -104730,7 +101021,6 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_s32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] @@ -104742,13 +101032,12 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } _vst2q_s32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] @@ -104760,7 +101049,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } @@ -104769,7 +101058,6 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_s32(a as _, b.0, b.1, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] @@ -104783,7 +101071,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" @@ -104792,7 +101080,6 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { } _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] @@ -104806,7 +101093,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" @@ -104818,7 +101105,6 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] @@ -104832,7 +101118,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" @@ -104841,7 +101127,6 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { } _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] @@ -104855,7 +101140,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" @@ -104867,7 +101152,6 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] @@ -104881,7 +101165,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" @@ -104890,7 +101174,6 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { } _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] @@ -104904,7 +101187,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" @@ -104916,7 +101199,6 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] @@ -104930,7 +101212,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" @@ -104939,7 +101221,6 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { } _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] @@ -104953,7 +101234,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" @@ -104965,7 +101246,6 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] @@ -104979,7 +101259,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" @@ -104988,7 +101268,6 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { } _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] @@ -105002,7 +101281,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" @@ -105014,7 +101293,6 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] @@ -105028,7 +101306,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" @@ -105037,7 +101315,6 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { } _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] @@ -105051,7 +101328,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" @@ -105063,7 +101340,6 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] @@ -105077,7 +101353,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" @@ -105086,7 +101362,6 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { } _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] @@ -105100,7 +101375,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" @@ -105112,7 +101387,6 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] @@ -105126,13 +101400,12 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] @@ -105146,7 +101419,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } @@ -105155,7 +101428,6 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] @@ -105169,13 +101441,12 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] @@ -105189,7 +101460,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } @@ -105198,7 +101469,6 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] @@ -105212,13 +101482,12 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] @@ -105232,7 +101501,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } @@ -105241,7 +101510,6 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] @@ -105255,13 +101523,12 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] @@ -105275,7 +101542,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } @@ -105284,7 +101551,6 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] @@ -105298,13 +101564,12 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] @@ -105318,7 +101583,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } @@ -105327,7 +101592,6 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] @@ -105341,13 +101605,12 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] @@ -105361,7 +101624,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } @@ -105370,7 +101633,6 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] @@ -105384,13 +101646,12 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] @@ -105404,7 +101665,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } @@ -105413,7 +101674,6 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] @@ -105440,7 +101700,6 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { static_assert_uimm_bits!(LANE, 3); vst2_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] @@ -105470,7 +101729,6 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] @@ -105497,7 +101755,6 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { static_assert_uimm_bits!(LANE, 2); vst2_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] @@ -105527,7 +101784,6 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] @@ -105554,7 +101810,6 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { static_assert_uimm_bits!(LANE, 3); vst2q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] @@ -105584,7 +101839,6 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] @@ -105611,7 +101865,6 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { static_assert_uimm_bits!(LANE, 1); vst2_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] @@ -105641,7 +101894,6 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] @@ -105668,7 +101920,6 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { static_assert_uimm_bits!(LANE, 2); vst2q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] @@ -105698,7 +101949,6 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] @@ -105725,7 +101975,6 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { static_assert_uimm_bits!(LANE, 3); vst2_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] @@ -105755,7 +102004,6 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] @@ -105782,7 +102030,6 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { static_assert_uimm_bits!(LANE, 2); vst2_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] @@ -105812,7 +102059,6 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] @@ -105839,7 +102085,6 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { static_assert_uimm_bits!(LANE, 3); vst2q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] @@ -105869,7 +102114,6 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] #[doc = "## Safety"] @@ -105893,7 +102137,6 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { vst2_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] @@ -105904,13 +102147,12 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v1i64.p0")] fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); } _vst2_s64(a as _, b.0, b.1, 8) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] @@ -105921,7 +102163,7 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" @@ -105930,7 +102172,6 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { } _vst2_s64(b.0, b.1, a as _) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] #[doc = "## Safety"] @@ -105954,7 +102195,6 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { vst2_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] @@ -105979,7 +102219,6 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { vst2_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] @@ -106007,7 +102246,6 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] @@ -106032,7 +102270,6 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { vst2q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] @@ -106068,7 +102305,6 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { ); vst2q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] @@ -106093,7 +102329,6 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { vst2_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] @@ -106121,7 +102356,6 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] @@ -106146,7 +102380,6 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { vst2q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] @@ -106174,7 +102407,6 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] @@ -106199,7 +102431,6 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { vst2_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] @@ -106227,7 +102458,6 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1]); vst2_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] @@ -106252,7 +102482,6 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { vst2q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] @@ -106280,7 +102509,6 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] @@ -106305,7 +102533,6 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { vst2_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] @@ -106333,7 +102560,6 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] @@ -106358,7 +102584,6 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { vst2q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] @@ -106394,7 +102619,6 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { ); vst2q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] @@ -106419,7 +102643,6 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { vst2_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] @@ -106447,7 +102670,6 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); vst2_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] @@ -106472,7 +102694,6 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { vst2q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 2-element structures from two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] @@ -106500,7 +102721,6 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); vst2q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] @@ -106512,13 +102732,12 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); } _vst3_f32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] @@ -106530,7 +102749,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); } @@ -106540,7 +102759,6 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_f32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] @@ -106552,13 +102770,12 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); } _vst3q_f32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] @@ -106570,7 +102787,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); } @@ -106580,7 +102797,6 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_f32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] @@ -106592,13 +102808,12 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); } _vst3_s8(a as _, b.0, b.1, b.2, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] @@ -106610,7 +102825,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); } @@ -106620,7 +102835,6 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3_s8(a as _, b.0, b.1, b.2, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] @@ -106632,13 +102846,12 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); } _vst3q_s8(a as _, b.0, b.1, b.2, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] @@ -106650,7 +102863,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); } @@ -106672,7 +102885,6 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { ); _vst3q_s8(a as _, b.0, b.1, b.2, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] @@ -106684,13 +102896,12 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); } _vst3_s16(a as _, b.0, b.1, b.2, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] @@ -106702,7 +102913,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); } @@ -106712,7 +102923,6 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3_s16(a as _, b.0, b.1, b.2, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] @@ -106724,13 +102934,12 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); } _vst3q_s16(a as _, b.0, b.1, b.2, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] @@ -106742,7 +102951,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); } @@ -106752,7 +102961,6 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3q_s16(a as _, b.0, b.1, b.2, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] @@ -106764,13 +102972,12 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); } _vst3_s32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] @@ -106782,7 +102989,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); } @@ -106792,7 +102999,6 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_s32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] @@ -106804,13 +103010,12 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); } _vst3q_s32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] @@ -106822,7 +103027,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); } @@ -106832,7 +103037,6 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_s32(a as _, b.0, b.1, b.2, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] @@ -106844,7 +103048,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" @@ -106853,7 +103057,6 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { } _vst3_f32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] @@ -106865,7 +103068,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" @@ -106878,7 +103081,6 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_f32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] @@ -106890,7 +103092,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" @@ -106899,7 +103101,6 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { } _vst3q_f32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] @@ -106911,7 +103112,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" @@ -106924,7 +103125,6 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_f32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] @@ -106936,7 +103136,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" @@ -106945,7 +103145,6 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { } _vst3_s8(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] @@ -106957,7 +103156,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" @@ -106970,7 +103169,6 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3_s8(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] @@ -106982,7 +103180,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" @@ -106991,7 +103189,6 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { } _vst3q_s8(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] @@ -107003,7 +103200,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" @@ -107028,7 +103225,6 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { ); _vst3q_s8(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] @@ -107040,7 +103236,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" @@ -107049,7 +103245,6 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { } _vst3_s16(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] @@ -107061,7 +103256,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" @@ -107074,7 +103269,6 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3_s16(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] @@ -107086,7 +103280,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" @@ -107095,7 +103289,6 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { } _vst3q_s16(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] @@ -107107,7 +103300,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" @@ -107120,7 +103313,6 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3q_s16(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] @@ -107132,7 +103324,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" @@ -107141,7 +103333,6 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { } _vst3_s32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] @@ -107153,7 +103344,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" @@ -107166,7 +103357,6 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_s32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] @@ -107178,7 +103368,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" @@ -107187,7 +103377,6 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { } _vst3q_s32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] @@ -107199,7 +103388,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" @@ -107212,7 +103401,6 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_s32(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] @@ -107226,7 +103414,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] fn _vst3_lane_f32( ptr: *mut i8, @@ -107239,7 +103427,6 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { } _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] @@ -107253,7 +103440,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] fn _vst3_lane_f32( ptr: *mut i8, @@ -107270,7 +103457,6 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] @@ -107284,7 +103470,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] fn _vst3q_lane_f32( ptr: *mut i8, @@ -107297,7 +103483,6 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { } _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] @@ -107311,7 +103496,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] fn _vst3q_lane_f32( ptr: *mut i8, @@ -107328,7 +103513,6 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] @@ -107342,13 +103526,12 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); } _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] @@ -107362,7 +103545,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); } @@ -107372,7 +103555,6 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] @@ -107386,7 +103568,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] fn _vst3_lane_s16( ptr: *mut i8, @@ -107399,7 +103581,6 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { } _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] @@ -107413,7 +103594,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] fn _vst3_lane_s16( ptr: *mut i8, @@ -107430,7 +103611,6 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] @@ -107444,7 +103624,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] fn _vst3q_lane_s16( ptr: *mut i8, @@ -107457,7 +103637,6 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { } _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] @@ -107471,7 +103650,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] fn _vst3q_lane_s16( ptr: *mut i8, @@ -107488,7 +103667,6 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] @@ -107502,7 +103680,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] fn _vst3_lane_s32( ptr: *mut i8, @@ -107515,7 +103693,6 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { } _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] @@ -107529,7 +103706,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] fn _vst3_lane_s32( ptr: *mut i8, @@ -107546,7 +103723,6 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] @@ -107560,7 +103736,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] fn _vst3q_lane_s32( ptr: *mut i8, @@ -107573,7 +103749,6 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { } _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] @@ -107587,7 +103762,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] fn _vst3q_lane_s32( ptr: *mut i8, @@ -107604,7 +103779,6 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] @@ -107618,7 +103792,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" @@ -107627,7 +103801,6 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { } _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] @@ -107641,7 +103814,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" @@ -107654,7 +103827,6 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] @@ -107668,7 +103840,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" @@ -107677,7 +103849,6 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { } _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] @@ -107691,7 +103862,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" @@ -107704,7 +103875,6 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] @@ -107718,7 +103888,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" @@ -107727,7 +103897,6 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { } _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] @@ -107741,7 +103910,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" @@ -107754,7 +103923,6 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] @@ -107768,7 +103936,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" @@ -107777,7 +103945,6 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { } _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] @@ -107791,7 +103958,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" @@ -107804,7 +103971,6 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] @@ -107818,7 +103984,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" @@ -107827,7 +103993,6 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { } _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] @@ -107841,7 +104006,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" @@ -107854,7 +104019,6 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] @@ -107868,7 +104032,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" @@ -107877,7 +104041,6 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { } _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] @@ -107891,7 +104054,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" @@ -107904,7 +104067,6 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] @@ -107918,7 +104080,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" @@ -107927,7 +104089,6 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { } _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] @@ -107941,7 +104102,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" @@ -107954,7 +104115,6 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] @@ -107981,7 +104141,6 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { static_assert_uimm_bits!(LANE, 3); vst3_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] @@ -108012,7 +104171,6 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] @@ -108039,7 +104197,6 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { static_assert_uimm_bits!(LANE, 2); vst3_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] @@ -108070,7 +104227,6 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] @@ -108097,7 +104253,6 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { static_assert_uimm_bits!(LANE, 3); vst3q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] @@ -108128,7 +104283,6 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] @@ -108155,7 +104309,6 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { static_assert_uimm_bits!(LANE, 1); vst3_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] @@ -108186,7 +104339,6 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] @@ -108213,7 +104365,6 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { static_assert_uimm_bits!(LANE, 2); vst3q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] @@ -108244,7 +104395,6 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] @@ -108271,7 +104421,6 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { static_assert_uimm_bits!(LANE, 3); vst3_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] @@ -108302,7 +104451,6 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] @@ -108329,7 +104477,6 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { static_assert_uimm_bits!(LANE, 2); vst3_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] @@ -108360,7 +104507,6 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] @@ -108387,7 +104533,6 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { static_assert_uimm_bits!(LANE, 3); vst3q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] @@ -108418,7 +104563,6 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] #[doc = "## Safety"] @@ -108442,7 +104586,6 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { vst3_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] @@ -108453,7 +104596,7 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" @@ -108462,7 +104605,6 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { } _vst3_s64(b.0, b.1, b.2, a as _) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] @@ -108473,13 +104615,12 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); } _vst3_s64(a as _, b.0, b.1, b.2, 8) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] #[doc = "## Safety"] @@ -108503,7 +104644,6 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { vst3_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] @@ -108528,7 +104668,6 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { vst3_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] @@ -108557,7 +104696,6 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] @@ -108582,7 +104720,6 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { vst3q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] @@ -108623,7 +104760,6 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { ); vst3q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] @@ -108648,7 +104784,6 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { vst3_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] @@ -108677,7 +104812,6 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] @@ -108702,7 +104836,6 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { vst3q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] @@ -108731,7 +104864,6 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] @@ -108756,7 +104888,6 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { vst3_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] @@ -108785,7 +104916,6 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1]); vst3_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] @@ -108810,7 +104940,6 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { vst3q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] @@ -108839,7 +104968,6 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] @@ -108864,7 +104992,6 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { vst3_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] @@ -108893,7 +105020,6 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] @@ -108918,7 +105044,6 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { vst3q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] @@ -108959,7 +105084,6 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { ); vst3q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] @@ -108984,7 +105108,6 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { vst3_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] @@ -109013,7 +105136,6 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); vst3_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] @@ -109038,7 +105160,6 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { vst3q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] @@ -109067,7 +105188,6 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); vst3q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] @@ -109079,7 +105199,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] fn _vst4_f32( ptr: *mut i8, @@ -109092,7 +105212,6 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { } _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] @@ -109104,7 +105223,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] fn _vst4_f32( ptr: *mut i8, @@ -109122,7 +105241,6 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] @@ -109134,7 +105252,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] fn _vst4q_f32( ptr: *mut i8, @@ -109147,7 +105265,6 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { } _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] @@ -109159,7 +105276,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] fn _vst4q_f32( ptr: *mut i8, @@ -109177,7 +105294,6 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] @@ -109189,13 +105305,12 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] @@ -109207,7 +105322,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } @@ -109218,7 +105333,6 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] @@ -109230,7 +105344,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] fn _vst4q_s8( ptr: *mut i8, @@ -109243,7 +105357,6 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { } _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] @@ -109255,7 +105368,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] fn _vst4q_s8( ptr: *mut i8, @@ -109289,7 +105402,6 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { ); _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] @@ -109301,7 +105413,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] fn _vst4_s16( ptr: *mut i8, @@ -109314,7 +105426,6 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { } _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] @@ -109326,7 +105437,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] fn _vst4_s16( ptr: *mut i8, @@ -109344,7 +105455,6 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] @@ -109356,7 +105466,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] fn _vst4q_s16( ptr: *mut i8, @@ -109369,7 +105479,6 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { } _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] @@ -109381,7 +105490,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] fn _vst4q_s16( ptr: *mut i8, @@ -109399,7 +105508,6 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] @@ -109411,7 +105519,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] fn _vst4_s32( ptr: *mut i8, @@ -109424,7 +105532,6 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { } _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] @@ -109436,7 +105543,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] fn _vst4_s32( ptr: *mut i8, @@ -109454,7 +105561,6 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] @@ -109466,7 +105572,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] fn _vst4q_s32( ptr: *mut i8, @@ -109479,7 +105585,6 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { } _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] @@ -109491,7 +105596,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] fn _vst4q_s32( ptr: *mut i8, @@ -109509,7 +105614,6 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] @@ -109521,7 +105625,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" @@ -109530,7 +105634,6 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { } _vst4_f32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] @@ -109542,7 +105645,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" @@ -109556,7 +105659,6 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_f32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] @@ -109568,7 +105670,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" @@ -109577,7 +105679,6 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { } _vst4q_f32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] @@ -109589,7 +105690,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" @@ -109603,7 +105704,6 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_f32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] @@ -109615,7 +105715,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" @@ -109624,7 +105724,6 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { } _vst4_s8(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] @@ -109636,7 +105735,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" @@ -109650,7 +105749,6 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4_s8(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] @@ -109662,7 +105760,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" @@ -109671,7 +105769,6 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { } _vst4q_s8(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] @@ -109683,7 +105780,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" @@ -109713,7 +105810,6 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { ); _vst4q_s8(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] @@ -109725,7 +105821,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" @@ -109734,7 +105830,6 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { } _vst4_s16(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] @@ -109746,7 +105841,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" @@ -109760,7 +105855,6 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4_s16(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] @@ -109772,7 +105866,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" @@ -109781,7 +105875,6 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { } _vst4q_s16(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] @@ -109793,7 +105886,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" @@ -109807,7 +105900,6 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4q_s16(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] @@ -109819,7 +105911,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" @@ -109828,7 +105920,6 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { } _vst4_s32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] @@ -109840,7 +105931,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" @@ -109854,7 +105945,6 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_s32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] @@ -109866,7 +105956,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" @@ -109875,7 +105965,6 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { } _vst4q_s32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] @@ -109887,7 +105976,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" @@ -109901,7 +105990,6 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_s32(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] @@ -109915,7 +106003,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] fn _vst4_lane_f32( ptr: *mut i8, @@ -109929,7 +106017,6 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { } _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] @@ -109943,7 +106030,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] fn _vst4_lane_f32( ptr: *mut i8, @@ -109962,7 +106049,6 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] @@ -109976,7 +106062,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] fn _vst4q_lane_f32( ptr: *mut i8, @@ -109990,7 +106076,6 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { } _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] @@ -110004,7 +106089,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] fn _vst4q_lane_f32( ptr: *mut i8, @@ -110023,7 +106108,6 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] @@ -110037,7 +106121,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] fn _vst4_lane_s8( ptr: *mut i8, @@ -110051,7 +106135,6 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { } _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] @@ -110065,7 +106148,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] fn _vst4_lane_s8( ptr: *mut i8, @@ -110084,7 +106167,6 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] @@ -110098,7 +106180,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] fn _vst4_lane_s16( ptr: *mut i8, @@ -110112,7 +106194,6 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { } _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] @@ -110126,7 +106207,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] fn _vst4_lane_s16( ptr: *mut i8, @@ -110145,7 +106226,6 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] @@ -110159,7 +106239,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] fn _vst4q_lane_s16( ptr: *mut i8, @@ -110173,7 +106253,6 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { } _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] @@ -110187,7 +106266,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] fn _vst4q_lane_s16( ptr: *mut i8, @@ -110206,7 +106285,6 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] @@ -110220,7 +106298,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] fn _vst4_lane_s32( ptr: *mut i8, @@ -110234,7 +106312,6 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { } _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] @@ -110248,7 +106325,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] fn _vst4_lane_s32( ptr: *mut i8, @@ -110267,7 +106344,6 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] @@ -110281,7 +106357,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] fn _vst4q_lane_s32( ptr: *mut i8, @@ -110295,7 +106371,6 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { } _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] @@ -110309,7 +106384,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] fn _vst4q_lane_s32( ptr: *mut i8, @@ -110328,7 +106403,6 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] @@ -110342,7 +106416,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" @@ -110358,7 +106432,6 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { } _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] @@ -110372,7 +106445,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" @@ -110393,7 +106466,6 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] @@ -110407,7 +106479,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" @@ -110423,7 +106495,6 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { } _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] @@ -110437,7 +106508,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" @@ -110458,7 +106529,6 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] @@ -110472,7 +106542,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" @@ -110481,7 +106551,6 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { } _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] @@ -110495,7 +106564,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" @@ -110509,7 +106578,6 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] @@ -110523,7 +106591,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" @@ -110539,7 +106607,6 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { } _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] @@ -110553,7 +106620,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" @@ -110574,7 +106641,6 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] @@ -110588,7 +106654,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" @@ -110604,7 +106670,6 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { } _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] @@ -110618,7 +106683,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" @@ -110639,7 +106704,6 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] @@ -110653,7 +106717,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" @@ -110669,7 +106733,6 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { } _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] @@ -110683,7 +106746,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" @@ -110704,7 +106767,6 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] @@ -110718,7 +106780,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" @@ -110734,7 +106796,6 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { } _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] @@ -110748,7 +106809,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" @@ -110769,7 +106830,6 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] @@ -110796,7 +106856,6 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { static_assert_uimm_bits!(LANE, 3); vst4_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] @@ -110828,7 +106887,6 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] @@ -110855,7 +106913,6 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { static_assert_uimm_bits!(LANE, 2); vst4_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] @@ -110887,7 +106944,6 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] @@ -110914,7 +106970,6 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { static_assert_uimm_bits!(LANE, 3); vst4q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] @@ -110946,7 +107001,6 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] @@ -110973,7 +107027,6 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { static_assert_uimm_bits!(LANE, 1); vst4_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] @@ -111005,7 +107058,6 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] @@ -111032,7 +107084,6 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { static_assert_uimm_bits!(LANE, 2); vst4q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] @@ -111064,7 +107115,6 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4q_lane_s32::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] @@ -111091,7 +107141,6 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { static_assert_uimm_bits!(LANE, 3); vst4_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] @@ -111123,7 +107172,6 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4_lane_s8::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] @@ -111150,7 +107198,6 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { static_assert_uimm_bits!(LANE, 2); vst4_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] @@ -111182,7 +107229,6 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] @@ -111209,7 +107255,6 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { static_assert_uimm_bits!(LANE, 3); vst4q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] @@ -111241,7 +107286,6 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4q_lane_s16::(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] #[doc = "## Safety"] @@ -111265,7 +107309,6 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { vst4_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] @@ -111276,7 +107319,7 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] fn _vst4_s64( ptr: *mut i8, @@ -111289,7 +107332,6 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { } _vst4_s64(a as _, b.0, b.1, b.2, b.3, 8) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] @@ -111300,7 +107342,7 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" @@ -111309,7 +107351,6 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { } _vst4_s64(b.0, b.1, b.2, b.3, a as _) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] #[doc = "## Safety"] @@ -111333,7 +107374,6 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { vst4_s64(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] @@ -111358,7 +107398,6 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { vst4_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] @@ -111388,7 +107427,6 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] @@ -111413,7 +107451,6 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { vst4q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] @@ -111459,7 +107496,6 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { ); vst4q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] @@ -111484,7 +107520,6 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { vst4_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] @@ -111514,7 +107549,6 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] @@ -111539,7 +107573,6 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { vst4q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] @@ -111569,7 +107602,6 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] @@ -111594,7 +107626,6 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { vst4_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] @@ -111624,7 +107655,6 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1]); vst4_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] @@ -111649,7 +107679,6 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { vst4q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] @@ -111679,7 +107708,6 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4q_s32(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] @@ -111704,7 +107732,6 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { vst4_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] @@ -111734,7 +107761,6 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] @@ -111759,7 +107785,6 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { vst4q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] @@ -111805,7 +107830,6 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { ); vst4q_s8(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] @@ -111830,7 +107854,6 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { vst4_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] @@ -111860,7 +107883,6 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); vst4_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] @@ -111885,7 +107907,6 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { vst4q_s16(transmute(a), transmute(b)) } - #[doc = "Store multiple 4-element structures from four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] @@ -111915,7 +107936,6 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); vst4q_s16(transmute(a), transmute(b)) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] #[doc = "## Safety"] @@ -111940,7 +107960,6 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] #[doc = "## Safety"] @@ -111968,7 +107987,6 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { let ret_val: float32x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] #[doc = "## Safety"] @@ -111993,7 +108011,6 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] #[doc = "## Safety"] @@ -112021,7 +108038,6 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { let ret_val: float32x4_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] #[doc = "## Safety"] @@ -112046,7 +108062,6 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] #[doc = "## Safety"] @@ -112074,7 +108089,6 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] #[doc = "## Safety"] @@ -112099,7 +108113,6 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] #[doc = "## Safety"] @@ -112127,7 +108140,6 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] #[doc = "## Safety"] @@ -112152,7 +108164,6 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] #[doc = "## Safety"] @@ -112180,7 +108191,6 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] #[doc = "## Safety"] @@ -112205,7 +108215,6 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] #[doc = "## Safety"] @@ -112233,7 +108242,6 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] #[doc = "## Safety"] @@ -112258,7 +108266,6 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] #[doc = "## Safety"] @@ -112286,7 +108293,6 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] #[doc = "## Safety"] @@ -112311,7 +108317,6 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] #[doc = "## Safety"] @@ -112339,7 +108344,6 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] #[doc = "## Safety"] @@ -112364,7 +108368,6 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] #[doc = "## Safety"] @@ -112392,7 +108395,6 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] #[doc = "## Safety"] @@ -112417,7 +108419,6 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] #[doc = "## Safety"] @@ -112445,7 +108446,6 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] #[doc = "## Safety"] @@ -112469,7 +108469,6 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] #[doc = "## Safety"] @@ -112494,7 +108493,6 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] #[doc = "## Safety"] @@ -112522,7 +108520,6 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] #[doc = "## Safety"] @@ -112546,7 +108543,6 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] #[doc = "## Safety"] @@ -112571,7 +108567,6 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] #[doc = "## Safety"] @@ -112599,7 +108594,6 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] #[doc = "## Safety"] @@ -112624,7 +108618,6 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] #[doc = "## Safety"] @@ -112652,7 +108645,6 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] #[doc = "## Safety"] @@ -112677,7 +108669,6 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] #[doc = "## Safety"] @@ -112709,7 +108700,6 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] #[doc = "## Safety"] @@ -112734,7 +108724,6 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] #[doc = "## Safety"] @@ -112762,7 +108751,6 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_sub(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] #[doc = "## Safety"] @@ -112787,7 +108775,6 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_sub(a, b) } - #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] #[doc = "## Safety"] @@ -112819,7 +108806,6 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] #[doc = "## Safety"] @@ -112845,7 +108831,6 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 let d: int8x8_t = vsubhn_s16(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] #[doc = "## Safety"] @@ -112880,7 +108865,6 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[doc = "## Safety"] @@ -112906,7 +108890,6 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 let d: int16x4_t = vsubhn_s32(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[doc = "## Safety"] @@ -112936,7 +108919,6 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 let ret_val: int16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[doc = "## Safety"] @@ -112962,7 +108944,6 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 let d: int32x2_t = vsubhn_s64(b, c); simd_shuffle!(a, d, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[doc = "## Safety"] @@ -112992,7 +108973,6 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 let ret_val: int32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[doc = "## Safety"] @@ -113018,7 +108998,6 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin let d: uint8x8_t = vsubhn_u16(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[doc = "## Safety"] @@ -113053,7 +109032,6 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[doc = "## Safety"] @@ -113079,7 +109057,6 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui let d: uint16x4_t = vsubhn_u32(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[doc = "## Safety"] @@ -113109,7 +109086,6 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui let ret_val: uint16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[doc = "## Safety"] @@ -113135,7 +109111,6 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui let d: uint32x2_t = vsubhn_u64(b, c); simd_shuffle!(a, d, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[doc = "## Safety"] @@ -113165,7 +109140,6 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui let ret_val: uint32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[doc = "## Safety"] @@ -113191,7 +109165,6 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[doc = "## Safety"] @@ -113220,7 +109193,6 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let ret_val: int8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[doc = "## Safety"] @@ -113246,7 +109218,6 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let c: i32x4 = i32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[doc = "## Safety"] @@ -113275,7 +109246,6 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let ret_val: int16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[doc = "## Safety"] @@ -113301,7 +109271,6 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let c: i64x2 = i64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[doc = "## Safety"] @@ -113330,7 +109299,6 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let ret_val: int32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[doc = "## Safety"] @@ -113356,7 +109324,6 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[doc = "## Safety"] @@ -113385,7 +109352,6 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[doc = "## Safety"] @@ -113411,7 +109377,6 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let c: u32x4 = u32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[doc = "## Safety"] @@ -113440,7 +109405,6 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[doc = "## Safety"] @@ -113466,7 +109430,6 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let c: u64x2 = u64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } - #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[doc = "## Safety"] @@ -113495,7 +109458,6 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[doc = "## Safety"] @@ -113522,7 +109484,6 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let d: int16x8_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[doc = "## Safety"] @@ -113552,7 +109513,6 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[doc = "## Safety"] @@ -113579,7 +109539,6 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let d: int32x4_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[doc = "## Safety"] @@ -113609,7 +109568,6 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[doc = "## Safety"] @@ -113636,7 +109594,6 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let d: int64x2_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[doc = "## Safety"] @@ -113666,7 +109623,6 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[doc = "## Safety"] @@ -113693,7 +109649,6 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let d: uint16x8_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[doc = "## Safety"] @@ -113723,7 +109678,6 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[doc = "## Safety"] @@ -113750,7 +109704,6 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let d: uint32x4_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[doc = "## Safety"] @@ -113780,7 +109733,6 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[doc = "## Safety"] @@ -113807,7 +109759,6 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let d: uint64x2_t = simd_cast(b); simd_sub(c, d) } - #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[doc = "## Safety"] @@ -113837,7 +109788,6 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_sub(c, d); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[doc = "## Safety"] @@ -113862,7 +109812,6 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { simd_sub(a, simd_cast(b)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[doc = "## Safety"] @@ -113890,7 +109839,6 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { let ret_val: int16x8_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[doc = "## Safety"] @@ -113915,7 +109863,6 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { simd_sub(a, simd_cast(b)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[doc = "## Safety"] @@ -113943,7 +109890,6 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { let ret_val: int32x4_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[doc = "## Safety"] @@ -113968,7 +109914,6 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { simd_sub(a, simd_cast(b)) } - #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[doc = "## Safety"] @@ -113996,7 +109941,6 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { let ret_val: int64x2_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[doc = "## Safety"] @@ -114021,7 +109965,6 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { simd_sub(a, simd_cast(b)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[doc = "## Safety"] @@ -114049,7 +109992,6 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[doc = "## Safety"] @@ -114074,7 +110016,6 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { simd_sub(a, simd_cast(b)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[doc = "## Safety"] @@ -114102,7 +110043,6 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[doc = "## Safety"] @@ -114127,7 +110067,6 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { simd_sub(a, simd_cast(b)) } - #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[doc = "## Safety"] @@ -114155,7 +110094,6 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { let ret_val: uint64x2_t = simd_sub(a, simd_cast(b)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] #[doc = "## Safety"] @@ -114188,7 +110126,6 @@ pub unsafe fn vsudot_lane_s32( let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vusdot_s32(a, transmute(c), b) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] #[doc = "## Safety"] @@ -114225,7 +110162,6 @@ pub unsafe fn vsudot_lane_s32( let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] #[doc = "## Safety"] @@ -114258,7 +110194,6 @@ pub unsafe fn vsudotq_lane_s32( let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vusdotq_s32(a, transmute(c), b) } - #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] #[doc = "## Safety"] @@ -114295,7 +110230,6 @@ pub unsafe fn vsudotq_lane_s32( let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] #[doc = "## Safety"] @@ -114308,13 +110242,12 @@ pub unsafe fn vsudotq_lane_s32( #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; } _vtbl1(a, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] #[doc = "## Safety"] @@ -114327,7 +110260,7 @@ unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; } @@ -114336,7 +110269,6 @@ unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vtbl1(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] @@ -114351,7 +110283,6 @@ unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vtbl1(a, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] @@ -114369,7 +110300,6 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbl1(a, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] @@ -114384,7 +110314,6 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl1(transmute(a), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] @@ -114402,7 +110331,6 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vtbl1(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] @@ -114417,7 +110345,6 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl1(transmute(a), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] @@ -114435,7 +110362,6 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vtbl1(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] #[doc = "## Safety"] @@ -114448,13 +110374,12 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } _vtbl2(a, b, c) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] #[doc = "## Safety"] @@ -114467,7 +110392,7 @@ unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } @@ -114477,7 +110402,6 @@ unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vtbl2(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] @@ -114492,7 +110416,6 @@ unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { vtbl2(a.0, a.1, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] @@ -114512,7 +110435,6 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbl2(a.0, a.1, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] @@ -114527,7 +110449,6 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] @@ -114547,7 +110468,6 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] @@ -114562,7 +110482,6 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] @@ -114582,7 +110501,6 @@ pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] #[doc = "## Safety"] @@ -114595,13 +110513,12 @@ pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } _vtbl3(a, b, c, d) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] #[doc = "## Safety"] @@ -114614,7 +110531,7 @@ unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } @@ -114625,7 +110542,6 @@ unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t let ret_val: int8x8_t = _vtbl3(a, b, c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] @@ -114640,7 +110556,6 @@ unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { vtbl3(a.0, a.1, a.2, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] @@ -114661,7 +110576,6 @@ pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbl3(a.0, a.1, a.2, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] @@ -114681,7 +110595,6 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { transmute(b), )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] @@ -114707,7 +110620,6 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] @@ -114727,7 +110639,6 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { transmute(b), )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] @@ -114753,7 +110664,6 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[doc = "## Safety"] @@ -114766,13 +110676,12 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } _vtbl4(a, b, c, d, e) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[doc = "## Safety"] @@ -114785,7 +110694,7 @@ unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } @@ -114797,7 +110706,6 @@ unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) let ret_val: int8x8_t = _vtbl4(a, b, c, d, e); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] @@ -114812,7 +110720,6 @@ unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { vtbl4(a.0, a.1, a.2, a.3, b) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] @@ -114834,7 +110741,6 @@ pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbl4(a.0, a.1, a.2, a.3, b); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] @@ -114855,7 +110761,6 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { transmute(b), )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] @@ -114883,7 +110788,6 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] @@ -114904,7 +110808,6 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { transmute(b), )) } - #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] @@ -114932,7 +110835,6 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] #[doc = "## Safety"] @@ -114944,13 +110846,12 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } _vtbx1(a, b, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] #[doc = "## Safety"] @@ -114962,7 +110863,7 @@ unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } @@ -114972,7 +110873,6 @@ unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = _vtbx1(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] @@ -114986,7 +110886,6 @@ unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { vtbx1(a, b, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] @@ -115004,7 +110903,6 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbx1(a, b, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] @@ -115018,7 +110916,6 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] @@ -115036,7 +110933,6 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] @@ -115050,7 +110946,6 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] @@ -115068,7 +110963,6 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { let ret_val: poly8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] #[doc = "## Safety"] @@ -115080,13 +110974,12 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } _vtbx2(a, b, c, d) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] #[doc = "## Safety"] @@ -115098,7 +110991,7 @@ unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } @@ -115109,7 +111002,6 @@ unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t let ret_val: int8x8_t = _vtbx2(a, b, c, d); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] @@ -115123,7 +111015,6 @@ unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { vtbx2(a, b.0, b.1, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] @@ -115143,7 +111034,6 @@ pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbx2(a, b.0, b.1, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] @@ -115162,7 +111052,6 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] @@ -115187,7 +111076,6 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] @@ -115206,7 +111094,6 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] @@ -115231,7 +111118,6 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[doc = "## Safety"] @@ -115243,13 +111129,12 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } _vtbx3(a, b, c, d, e) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[doc = "## Safety"] @@ -115261,7 +111146,7 @@ unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } @@ -115273,7 +111158,6 @@ unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) let ret_val: int8x8_t = _vtbx3(a, b, c, d, e); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] @@ -115287,7 +111171,6 @@ unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { vtbx3(a, b.0, b.1, b.2, c) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] @@ -115308,7 +111191,6 @@ pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { let ret_val: int8x8_t = vtbx3(a, b.0, b.1, b.2, c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] @@ -115328,7 +111210,6 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] @@ -115355,7 +111236,6 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] @@ -115375,7 +111255,6 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] @@ -115402,7 +111281,6 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] #[doc = "## Safety"] @@ -115421,7 +111299,7 @@ unsafe fn vtbx4( e: int8x8_t, f: int8x8_t, ) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] fn _vtbx4( a: int8x8_t, @@ -115434,7 +111312,6 @@ unsafe fn vtbx4( } _vtbx4(a, b, c, d, e, f) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] #[doc = "## Safety"] @@ -115453,7 +111330,7 @@ unsafe fn vtbx4( e: int8x8_t, f: int8x8_t, ) -> int8x8_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] fn _vtbx4( a: int8x8_t, @@ -115473,7 +111350,6 @@ unsafe fn vtbx4( let ret_val: int8x8_t = _vtbx4(a, b, c, d, e, f); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] @@ -115494,7 +111370,6 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { c, ) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] @@ -115523,7 +111398,6 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { ); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] @@ -115544,7 +111418,6 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] @@ -115573,7 +111446,6 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] @@ -115594,7 +111466,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t transmute(c), )) } - #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] @@ -115623,7 +111494,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t )); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[doc = "## Safety"] @@ -115650,7 +111520,6 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[doc = "## Safety"] @@ -115682,7 +111551,6 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[doc = "## Safety"] @@ -115709,7 +111577,6 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[doc = "## Safety"] @@ -115741,7 +111608,6 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[doc = "## Safety"] @@ -115768,7 +111634,6 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[doc = "## Safety"] @@ -115800,7 +111665,6 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] #[doc = "## Safety"] @@ -115827,7 +111691,6 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] #[doc = "## Safety"] @@ -115859,7 +111722,6 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[doc = "## Safety"] @@ -115886,7 +111748,6 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[doc = "## Safety"] @@ -115918,7 +111779,6 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[doc = "## Safety"] @@ -115953,7 +111813,6 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[doc = "## Safety"] @@ -116001,7 +111860,6 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[doc = "## Safety"] @@ -116028,7 +111886,6 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[doc = "## Safety"] @@ -116060,7 +111917,6 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[doc = "## Safety"] @@ -116087,7 +111943,6 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[doc = "## Safety"] @@ -116119,7 +111974,6 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[doc = "## Safety"] @@ -116146,7 +112000,6 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[doc = "## Safety"] @@ -116178,7 +112031,6 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[doc = "## Safety"] @@ -116205,7 +112057,6 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[doc = "## Safety"] @@ -116237,7 +112088,6 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[doc = "## Safety"] @@ -116272,7 +112122,6 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[doc = "## Safety"] @@ -116320,7 +112169,6 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[doc = "## Safety"] @@ -116347,7 +112195,6 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[doc = "## Safety"] @@ -116379,7 +112226,6 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[doc = "## Safety"] @@ -116406,7 +112252,6 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[doc = "## Safety"] @@ -116438,7 +112283,6 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[doc = "## Safety"] @@ -116465,7 +112309,6 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[doc = "## Safety"] @@ -116497,7 +112340,6 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[doc = "## Safety"] @@ -116524,7 +112366,6 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[doc = "## Safety"] @@ -116556,7 +112397,6 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[doc = "## Safety"] @@ -116591,7 +112431,6 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[doc = "## Safety"] @@ -116639,7 +112478,6 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[doc = "## Safety"] @@ -116666,7 +112504,6 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[doc = "## Safety"] @@ -116698,7 +112535,6 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[doc = "## Safety"] @@ -116725,7 +112561,6 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); transmute((a1, b1)) } - #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[doc = "## Safety"] @@ -116757,7 +112592,6 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[doc = "## Safety"] @@ -116784,7 +112618,6 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[doc = "## Safety"] @@ -116814,7 +112647,6 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[doc = "## Safety"] @@ -116841,7 +112673,6 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[doc = "## Safety"] @@ -116875,7 +112706,6 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[doc = "## Safety"] @@ -116902,7 +112732,6 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let d: i16x4 = i16x4::new(0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[doc = "## Safety"] @@ -116932,7 +112761,6 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[doc = "## Safety"] @@ -116959,7 +112787,6 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[doc = "## Safety"] @@ -116989,7 +112816,6 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[doc = "## Safety"] @@ -117016,7 +112842,6 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let d: i32x2 = i32x2::new(0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[doc = "## Safety"] @@ -117046,7 +112871,6 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[doc = "## Safety"] @@ -117073,7 +112897,6 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let d: i32x4 = i32x4::new(0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[doc = "## Safety"] @@ -117103,7 +112926,6 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[doc = "## Safety"] @@ -117130,7 +112952,6 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[doc = "## Safety"] @@ -117160,7 +112981,6 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[doc = "## Safety"] @@ -117187,7 +113007,6 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[doc = "## Safety"] @@ -117221,7 +113040,6 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[doc = "## Safety"] @@ -117248,7 +113066,6 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let d: i16x4 = i16x4::new(0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[doc = "## Safety"] @@ -117278,7 +113095,6 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[doc = "## Safety"] @@ -117305,7 +113121,6 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[doc = "## Safety"] @@ -117335,7 +113150,6 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[doc = "## Safety"] @@ -117362,7 +113176,6 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[doc = "## Safety"] @@ -117392,7 +113205,6 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let ret_val: uint8x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[doc = "## Safety"] @@ -117419,7 +113231,6 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[doc = "## Safety"] @@ -117453,7 +113264,6 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[doc = "## Safety"] @@ -117480,7 +113290,6 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let d: u16x4 = u16x4::new(0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[doc = "## Safety"] @@ -117510,7 +113319,6 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let ret_val: uint16x4_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[doc = "## Safety"] @@ -117537,7 +113345,6 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[doc = "## Safety"] @@ -117567,7 +113374,6 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let ret_val: uint16x8_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[doc = "## Safety"] @@ -117594,7 +113400,6 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let d: u32x2 = u32x2::new(0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[doc = "## Safety"] @@ -117624,7 +113429,6 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let ret_val: uint32x2_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[doc = "## Safety"] @@ -117651,7 +113455,6 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let d: u32x4 = u32x4::new(0, 0, 0, 0); simd_ne(c, transmute(d)) } - #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[doc = "## Safety"] @@ -117681,7 +113484,6 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let ret_val: uint32x4_t = simd_ne(c, transmute(d)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[doc = "## Safety"] @@ -117714,7 +113516,6 @@ pub unsafe fn vusdot_lane_s32( let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vusdot_s32(a, b, transmute(c)) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[doc = "## Safety"] @@ -117751,7 +113552,6 @@ pub unsafe fn vusdot_lane_s32( let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[doc = "## Safety"] @@ -117784,7 +113584,6 @@ pub unsafe fn vusdotq_lane_s32( let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vusdotq_s32(a, b, transmute(c)) } - #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[doc = "## Safety"] @@ -117821,7 +113620,6 @@ pub unsafe fn vusdotq_lane_s32( let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[doc = "## Safety"] @@ -117844,7 +113642,7 @@ pub unsafe fn vusdotq_lane_s32( unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" @@ -117854,7 +113652,6 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { } _vusdot_s32(a, b.as_signed(), c) } - #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[doc = "## Safety"] @@ -117877,7 +113674,7 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" @@ -117891,7 +113688,6 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { let ret_val: int32x2_t = _vusdot_s32(a, b.as_signed(), c); simd_shuffle!(ret_val, ret_val, [0, 1]) } - #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[doc = "## Safety"] @@ -117914,7 +113710,7 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" @@ -117924,7 +113720,6 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ } _vusdotq_s32(a, b.as_signed(), c) } - #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[doc = "## Safety"] @@ -117947,7 +113742,7 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" @@ -117961,7 +113756,6 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ let ret_val: int32x4_t = _vusdotq_s32(a, b.as_signed(), c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[doc = "## Safety"] @@ -117984,7 +113778,7 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" @@ -117994,7 +113788,6 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 } _vusmmlaq_s32(a, b.as_signed(), c) } - #[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[doc = "## Safety"] @@ -118017,7 +113810,7 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - extern "unadjusted" { + unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" @@ -118031,7 +113824,6 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 let ret_val: int32x4_t = _vusmmlaq_s32(a, b.as_signed(), c); simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[doc = "## Safety"] @@ -118058,7 +113850,6 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[doc = "## Safety"] @@ -118090,7 +113881,6 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[doc = "## Safety"] @@ -118117,7 +113907,6 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[doc = "## Safety"] @@ -118149,7 +113938,6 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[doc = "## Safety"] @@ -118176,7 +113964,6 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[doc = "## Safety"] @@ -118208,7 +113995,6 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[doc = "## Safety"] @@ -118235,7 +114021,6 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[doc = "## Safety"] @@ -118267,7 +114052,6 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[doc = "## Safety"] @@ -118294,7 +114078,6 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[doc = "## Safety"] @@ -118326,7 +114109,6 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[doc = "## Safety"] @@ -118361,7 +114143,6 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[doc = "## Safety"] @@ -118409,7 +114190,6 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[doc = "## Safety"] @@ -118436,7 +114216,6 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[doc = "## Safety"] @@ -118468,7 +114247,6 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[doc = "## Safety"] @@ -118495,7 +114273,6 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[doc = "## Safety"] @@ -118527,7 +114304,6 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[doc = "## Safety"] @@ -118554,7 +114330,6 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[doc = "## Safety"] @@ -118586,7 +114361,6 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[doc = "## Safety"] @@ -118613,7 +114387,6 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[doc = "## Safety"] @@ -118645,7 +114418,6 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[doc = "## Safety"] @@ -118680,7 +114452,6 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[doc = "## Safety"] @@ -118728,7 +114499,6 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[doc = "## Safety"] @@ -118755,7 +114525,6 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[doc = "## Safety"] @@ -118787,7 +114556,6 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[doc = "## Safety"] @@ -118814,7 +114582,6 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[doc = "## Safety"] @@ -118846,7 +114613,6 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[doc = "## Safety"] @@ -118873,7 +114639,6 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[doc = "## Safety"] @@ -118905,7 +114670,6 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[doc = "## Safety"] @@ -118932,7 +114696,6 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[doc = "## Safety"] @@ -118964,7 +114727,6 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[doc = "## Safety"] @@ -118999,7 +114761,6 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[doc = "## Safety"] @@ -119047,7 +114808,6 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[doc = "## Safety"] @@ -119074,7 +114834,6 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[doc = "## Safety"] @@ -119106,7 +114865,6 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[doc = "## Safety"] @@ -119133,7 +114891,6 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } - #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[doc = "## Safety"] @@ -119165,7 +114922,6 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[doc = "## Safety"] @@ -119192,7 +114948,6 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[doc = "## Safety"] @@ -119224,7 +114979,6 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[doc = "## Safety"] @@ -119251,7 +115005,6 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[doc = "## Safety"] @@ -119283,7 +115036,6 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[doc = "## Safety"] @@ -119310,7 +115062,6 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[doc = "## Safety"] @@ -119342,7 +115093,6 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[doc = "## Safety"] @@ -119369,7 +115119,6 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[doc = "## Safety"] @@ -119401,7 +115150,6 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[doc = "## Safety"] @@ -119428,7 +115176,6 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[doc = "## Safety"] @@ -119460,7 +115207,6 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[doc = "## Safety"] @@ -119487,7 +115233,6 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[doc = "## Safety"] @@ -119519,7 +115264,6 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[doc = "## Safety"] @@ -119546,7 +115290,6 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[doc = "## Safety"] @@ -119578,7 +115321,6 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[doc = "## Safety"] @@ -119605,7 +115347,6 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[doc = "## Safety"] @@ -119637,7 +115378,6 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[doc = "## Safety"] @@ -119664,7 +115404,6 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[doc = "## Safety"] @@ -119696,7 +115435,6 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[doc = "## Safety"] @@ -119723,7 +115461,6 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[doc = "## Safety"] @@ -119755,7 +115492,6 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[doc = "## Safety"] @@ -119790,7 +115526,6 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[doc = "## Safety"] @@ -119838,7 +115573,6 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { ); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[doc = "## Safety"] @@ -119865,7 +115599,6 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[doc = "## Safety"] @@ -119897,7 +115630,6 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[doc = "## Safety"] @@ -119924,7 +115656,6 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[doc = "## Safety"] @@ -119956,7 +115687,6 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[doc = "## Safety"] @@ -119991,7 +115721,6 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[doc = "## Safety"] @@ -120039,7 +115768,6 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { ); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[doc = "## Safety"] @@ -120066,7 +115794,6 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[doc = "## Safety"] @@ -120098,7 +115825,6 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[doc = "## Safety"] @@ -120125,7 +115851,6 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[doc = "## Safety"] @@ -120157,7 +115882,6 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[doc = "## Safety"] @@ -120192,7 +115916,6 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[doc = "## Safety"] @@ -120240,7 +115963,6 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { ); ret_val } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] #[doc = "## Safety"] @@ -120267,7 +115989,6 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } - #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] #[doc = "## Safety"] diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 4b49d77889..623d39c398 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -11029,78 +11029,42 @@ mod tests { unsafe fn test_vceq_s8() { test_cmp_s8( |i, j| vceq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s8() { testq_cmp_s8( |i, j| vceqq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s16() { test_cmp_s16( |i, j| vceq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s16() { testq_cmp_s16( |i, j| vceqq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_s32() { test_cmp_s32( |i, j| vceq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_s32() { testq_cmp_s32( |i, j| vceqq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11108,78 +11072,42 @@ mod tests { unsafe fn test_vceq_u8() { test_cmp_u8( |i, j| vceq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u8() { testq_cmp_u8( |i, j| vceqq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a == b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a == b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u16() { test_cmp_u16( |i, j| vceq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u16() { testq_cmp_u16( |i, j| vceqq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a == b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a == b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceq_u32() { test_cmp_u32( |i, j| vceq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_u32() { testq_cmp_u32( |i, j| vceqq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11187,26 +11115,14 @@ mod tests { unsafe fn test_vceq_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vceqq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a == b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a == b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11214,78 +11130,42 @@ mod tests { unsafe fn test_vcgt_s8() { test_cmp_s8( |i, j| vcgt_s8(i, j), - |a: i8, b: i8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s8() { testq_cmp_s8( |i, j| vcgtq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s16() { test_cmp_s16( |i, j| vcgt_s16(i, j), - |a: i16, b: i16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s16() { testq_cmp_s16( |i, j| vcgtq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_s32() { test_cmp_s32( |i, j| vcgt_s32(i, j), - |a: i32, b: i32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_s32() { testq_cmp_s32( |i, j| vcgtq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11293,78 +11173,42 @@ mod tests { unsafe fn test_vcgt_u8() { test_cmp_u8( |i, j| vcgt_u8(i, j), - |a: u8, b: u8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u8() { testq_cmp_u8( |i, j| vcgtq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a > b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a > b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u16() { test_cmp_u16( |i, j| vcgt_u16(i, j), - |a: u16, b: u16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u16() { testq_cmp_u16( |i, j| vcgtq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a > b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a > b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgt_u32() { test_cmp_u32( |i, j| vcgt_u32(i, j), - |a: u32, b: u32| -> u32 { - if a > b { - 0xFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_u32() { testq_cmp_u32( |i, j| vcgtq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11372,26 +11216,14 @@ mod tests { unsafe fn test_vcgt_f32() { test_cmp_f32( |i, j| vcgt_f32(i, j), - |a: f32, b: f32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgtq_f32() { testq_cmp_f32( |i, j| vcgtq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a > b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a > b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11399,78 +11231,42 @@ mod tests { unsafe fn test_vclt_s8() { test_cmp_s8( |i, j| vclt_s8(i, j), - |a: i8, b: i8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s8() { testq_cmp_s8( |i, j| vcltq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s16() { test_cmp_s16( |i, j| vclt_s16(i, j), - |a: i16, b: i16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s16() { testq_cmp_s16( |i, j| vcltq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_s32() { test_cmp_s32( |i, j| vclt_s32(i, j), - |a: i32, b: i32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_s32() { testq_cmp_s32( |i, j| vcltq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11478,78 +11274,42 @@ mod tests { unsafe fn test_vclt_u8() { test_cmp_u8( |i, j| vclt_u8(i, j), - |a: u8, b: u8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u8() { testq_cmp_u8( |i, j| vcltq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a < b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a < b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u16() { test_cmp_u16( |i, j| vclt_u16(i, j), - |a: u16, b: u16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u16() { testq_cmp_u16( |i, j| vcltq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a < b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a < b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vclt_u32() { test_cmp_u32( |i, j| vclt_u32(i, j), - |a: u32, b: u32| -> u32 { - if a < b { - 0xFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_u32() { testq_cmp_u32( |i, j| vcltq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11557,26 +11317,14 @@ mod tests { unsafe fn test_vclt_f32() { test_cmp_f32( |i, j| vclt_f32(i, j), - |a: f32, b: f32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcltq_f32() { testq_cmp_f32( |i, j| vcltq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a < b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a < b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11584,78 +11332,42 @@ mod tests { unsafe fn test_vcle_s8() { test_cmp_s8( |i, j| vcle_s8(i, j), - |a: i8, b: i8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s8() { testq_cmp_s8( |i, j| vcleq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s16() { test_cmp_s16( |i, j| vcle_s16(i, j), - |a: i16, b: i16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s16() { testq_cmp_s16( |i, j| vcleq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_s32() { test_cmp_s32( |i, j| vcle_s32(i, j), - |a: i32, b: i32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_s32() { testq_cmp_s32( |i, j| vcleq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11663,78 +11375,42 @@ mod tests { unsafe fn test_vcle_u8() { test_cmp_u8( |i, j| vcle_u8(i, j), - |a: u8, b: u8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u8() { testq_cmp_u8( |i, j| vcleq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a <= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a <= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u16() { test_cmp_u16( |i, j| vcle_u16(i, j), - |a: u16, b: u16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u16() { testq_cmp_u16( |i, j| vcleq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a <= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a <= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcle_u32() { test_cmp_u32( |i, j| vcle_u32(i, j), - |a: u32, b: u32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_u32() { testq_cmp_u32( |i, j| vcleq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11742,26 +11418,14 @@ mod tests { unsafe fn test_vcle_f32() { test_cmp_f32( |i, j| vcle_f32(i, j), - |a: f32, b: f32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcleq_f32() { testq_cmp_f32( |i, j| vcleq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a <= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a <= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11769,78 +11433,42 @@ mod tests { unsafe fn test_vcge_s8() { test_cmp_s8( |i, j| vcge_s8(i, j), - |a: i8, b: i8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s8() { testq_cmp_s8( |i, j| vcgeq_s8(i, j), - |a: i8, b: i8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: i8, b: i8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s16() { test_cmp_s16( |i, j| vcge_s16(i, j), - |a: i16, b: i16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s16() { testq_cmp_s16( |i, j| vcgeq_s16(i, j), - |a: i16, b: i16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: i16, b: i16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_s32() { test_cmp_s32( |i, j| vcge_s32(i, j), - |a: i32, b: i32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_s32() { testq_cmp_s32( |i, j| vcgeq_s32(i, j), - |a: i32, b: i32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: i32, b: i32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11848,78 +11476,42 @@ mod tests { unsafe fn test_vcge_u8() { test_cmp_u8( |i, j| vcge_u8(i, j), - |a: u8, b: u8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u8() { testq_cmp_u8( |i, j| vcgeq_u8(i, j), - |a: u8, b: u8| -> u8 { - if a >= b { - 0xFF - } else { - 0 - } - }, + |a: u8, b: u8| -> u8 { if a >= b { 0xFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u16() { test_cmp_u16( |i, j| vcge_u16(i, j), - |a: u16, b: u16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u16() { testq_cmp_u16( |i, j| vcgeq_u16(i, j), - |a: u16, b: u16| -> u16 { - if a >= b { - 0xFFFF - } else { - 0 - } - }, + |a: u16, b: u16| -> u16 { if a >= b { 0xFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcge_u32() { test_cmp_u32( |i, j| vcge_u32(i, j), - |a: u32, b: u32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_u32() { testq_cmp_u32( |i, j| vcgeq_u32(i, j), - |a: u32, b: u32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: u32, b: u32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } @@ -11927,26 +11519,14 @@ mod tests { unsafe fn test_vcge_f32() { test_cmp_f32( |i, j| vcge_f32(i, j), - |a: f32, b: f32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } #[simd_test(enable = "neon")] unsafe fn test_vcgeq_f32() { testq_cmp_f32( |i, j| vcgeq_f32(i, j), - |a: f32, b: f32| -> u32 { - if a >= b { - 0xFFFFFFFF - } else { - 0 - } - }, + |a: f32, b: f32| -> u32 { if a >= b { 0xFFFFFFFF } else { 0 } }, ); } diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index ce7b169c5a..d9577db3c6 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -236,7 +236,9 @@ fn compile_c( let compiler_command = if target == "aarch64_be-unknown-linux-gnu" { let Some(cxx_toolchain_dir) = cxx_toolchain_dir else { - panic!("When setting `--target aarch64_be-unknown-linux-gnu` the C++ compilers toolchain directory must be set with `--cxx-toolchain-dir `"); + panic!( + "When setting `--target aarch64_be-unknown-linux-gnu` the C++ compilers toolchain directory must be set with `--cxx-toolchain-dir `" + ); }; /* clang++ cannot link an aarch64_be object file, so we invoke diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 99a3aed8d6..5d20bfc90c 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -9902,7 +9902,7 @@ intrinsics: # Inlining seems broken for 'fn vld1_v1i64', this "fixes" it - Let: [a, '*const i8', 'ptr as *const i8'] - Let: [b, i32, 'crate::mem::align_of::() as i32'] - - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; }} transmute(_vld1_v1i64(a, b))' + - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; }} transmute(_vld1_v1i64(a, b))' - name: "vtbx1" visibility: private @@ -10681,7 +10681,7 @@ intrinsics: - Let: [a, i32, 'crc as i32'] - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] - Let: [c, i32, '(data >> 32).as_signed() as i32'] - - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: i32, data: i32) -> i32;}} ___crc32w(___crc32w(a, b), c).as_unsigned()' + - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: i32, data: i32) -> i32;}} ___crc32w(___crc32w(a, b), c).as_unsigned()' - name: "__crc32cd" doc: "CRC32-C single round checksum for quad words (64 bits)." @@ -10700,7 +10700,7 @@ intrinsics: - Let: [a, i32, 'crc as i32'] - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] - Let: [c, i32, '(data >> 32).as_signed() as i32'] - - 'extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: i32, data: i32) -> i32;}} ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32' + - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: i32, data: i32) -> i32;}} ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32' - name: "vabs{neon_type.no}" doc: "Absolute value (wrapping)." diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index 5d17a7aed6..4d13c27685 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -1726,7 +1726,6 @@ fn create_tokens(intrinsic: &Intrinsic, endianness: Endianness, tokens: &mut Tok Endianness::Big => &intrinsic.big_endian_compose, }; - /* If we have manually defined attributes on the block of yaml with * 'attr:' we want to add them */ if let Some(attr) = &intrinsic.attr { From 48db139b659e26fb2aea021a84f9e3bc68559468 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 11 Feb 2025 18:07:20 +0000 Subject: [PATCH 11/13] update clang to clang-19 --- ci/docker/aarch64-unknown-linux-gnu/Dockerfile | 2 +- ci/docker/aarch64_be-none-linux-gnu/Dockerfile | 2 +- ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile | 4 ++-- ci/run.sh | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile index 7f09ca053a..a608d3954c 100644 --- a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile +++ b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-15 \ + clang-19 \ lld ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ diff --git a/ci/docker/aarch64_be-none-linux-gnu/Dockerfile b/ci/docker/aarch64_be-none-linux-gnu/Dockerfile index 91026f6a15..13f9342448 100644 --- a/ci/docker/aarch64_be-none-linux-gnu/Dockerfile +++ b/ci/docker/aarch64_be-none-linux-gnu/Dockerfile @@ -9,5 +9,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-18 \ + clang-19 \ lld diff --git a/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile b/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile index be9959240b..401164c19b 100644 --- a/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile +++ b/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ g++ \ @@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-15 \ + clang-19 \ lld ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER="qemu-arm -L /usr/arm-linux-gnueabihf" \ diff --git a/ci/run.sh b/ci/run.sh index 11cffd6de8..f8a75ed10e 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -141,13 +141,13 @@ case ${TARGET} in aarch64-unknown-linux-gnu*) TEST_CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt - TEST_CXX_COMPILER="clang++-15" + TEST_CXX_COMPILER="clang++-19" TEST_RUNNER="${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" ;; armv7-unknown-linux-gnueabihf*) TEST_CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_arm.txt - TEST_CXX_COMPILER="clang++-15" + TEST_CXX_COMPILER="clang++-19" TEST_RUNNER="${CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER}" ;; *) @@ -195,7 +195,7 @@ case "${TARGET}" in "./target/${TARGET}/release/intrinsic-test" \ "./intrinsics_data/arm_intrinsics.json" \ --target "${TARGET}" \ - --cppcompiler "clang++-18" \ + --cppcompiler "clang++-19" \ --skip crates/intrinsic-test/missing_aarch64.txt \ --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" \ --linker "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER}" \ From 2b5126475a064a4e8a785023584f1a0aa4c7e6a2 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Mon, 17 Feb 2025 15:23:08 +0000 Subject: [PATCH 12/13] tidy up proposed Dockerfile & run.sh --- .../aarch64_be-none-linux-gnu/Dockerfile | 13 ----- .../aarch64_be-unknown-linux-gnu/Dockerfile | 29 ++++++++++++ ci/run.sh | 47 +++++++------------ 3 files changed, 45 insertions(+), 44 deletions(-) delete mode 100644 ci/docker/aarch64_be-none-linux-gnu/Dockerfile create mode 100644 ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile diff --git a/ci/docker/aarch64_be-none-linux-gnu/Dockerfile b/ci/docker/aarch64_be-none-linux-gnu/Dockerfile deleted file mode 100644 index 13f9342448..0000000000 --- a/ci/docker/aarch64_be-none-linux-gnu/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM ubuntu:24.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc \ - g++ \ - ca-certificates \ - libc6-dev \ - libc6-dev-arm64-cross \ - qemu-user \ - make \ - file \ - clang-19 \ - lld diff --git a/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000..5562638646 --- /dev/null +++ b/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile @@ -0,0 +1,29 @@ +FROM ubuntu:24.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + g++ \ + ca-certificates \ + libc6-dev \ + libc6-dev-arm64-cross \ + qemu-user \ + make \ + file \ + clang-19 \ + curl \ + xz-utils \ + lld + +ENV TOOLCHAIN="arm-gnu-toolchain-14.2.rel1-x86_64-aarch64_be-none-linux-gnu" + +# Download the aarch64_be gcc toolchain +RUN curl -L "https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/${TOOLCHAIN}.tar.xz" -o "${TOOLCHAIN}.tar.xz" +RUN tar -xvf "${TOOLCHAIN}.tar.xz" +RUN mkdir /toolchains && mv "./${TOOLCHAIN}" /toolchains + +ENV AARCH64_BE_TOOLCHAIN="/toolchains/${TOOLCHAIN}" +ENV AARCH64_BE_LIBC="${AARCH64_BE_TOOLCHAIN}/aarch64_be-none-linux-gnu/libc" + +ENV CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_LINKER="${AARCH64_BE_TOOLCHAIN}/bin/aarch64_be-none-linux-gnu-gcc" +ENV CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64_be -L ${AARCH64_BE_LIBC}" +ENV OBJDUMP="${AARCH64_BE_TOOLCHAIN}/bin/bin/aarch64-none-linux-gnu-objdump" diff --git a/ci/run.sh b/ci/run.sh index f8a75ed10e..73fd24a070 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -144,6 +144,14 @@ case ${TARGET} in TEST_CXX_COMPILER="clang++-19" TEST_RUNNER="${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" ;; + + aarch64_be-unknown-linux-gnu*) + TEST_CPPFLAGS="-fuse-ld=lld" + TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt + TEST_CXX_COMPILER="clang++-19" + TEST_RUNNER="${CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_RUNNER}" + ;; + armv7-unknown-linux-gnueabihf*) TEST_CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_arm.txt @@ -167,41 +175,18 @@ case "${TARGET}" in --target "${TARGET}" ;; - aarch64_be-unknown-linux-gnu) - # get the aarch64_be toolchain - TOOLCHAIN="arm-gnu-toolchain-14.2.rel1-x86_64-aarch64_be-none-linux-gnu" - - # Download the aarch64_be gcc toolchain - curl -L "https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/${TOOLCHAIN}.tar.xz" \ - -o "${TOOLCHAIN}.tar.xz" && \ - tar -xzvf "./${TOOLCHAIN}".tar.xz && \ - mdkir /toolchains && - mv "./${TOOLCHAIN}" /toolchains - - # Build the test suite - AARCH64_BE_TOOLCHAIN="/toolchains/${TOOLCHAIN}" - AARCH64_BE_LIBC="${AARCH64_BE_TOOLCHAIN}/aarch64_be-none-linux-gnu/libc" - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="${AARCH64_BE_TOOLCHAIN}/bin/aarch64_be-none-linux-gnu-gcc" \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64_be -L ${AARCH64_BE_LIBC}" \ - CPPFLAGS="-fuse-ld=lld" \ - RUSTFLAGS="-C linker=${AARCH64_BE}/bin/aarch64_be-none-linux-gnu-gcc -C link-args=-static" \ - cargo build \ - --target="${TARGET}" \ - --manifest-path=crates/intrinsic-test/Cargo.toml \ - --profile=release - - # Now run it - qemu-aarch64_be -L "${AARCH64_BE_LIBC}" \ - "./target/${TARGET}/release/intrinsic-test" \ - "./intrinsics_data/arm_intrinsics.json" \ + aarch64_be-unknown-linux-gnu*) + CPPFLAGS="${TEST_CPPFLAGS}" RUSTFLAGS="${HOST_RUSTFLAGS}" RUST_LOG=warn \ + cargo run "${INTRINSIC_TEST}" "${PROFILE}" \ + --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json \ + --runner "${TEST_RUNNER}" \ + --cppcompiler "${TEST_CXX_COMPILER}" \ + --skip "${TEST_SKIP_INTRINSICS}" \ --target "${TARGET}" \ - --cppcompiler "clang++-19" \ - --skip crates/intrinsic-test/missing_aarch64.txt \ - --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" \ --linker "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER}" \ --cxx-toolchain-dir "${AARCH64_BE_TOOLCHAIN}" ;; - *) + *) ;; esac From fbe5fce819762a976a16152eda00ed0dae186196 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Tue, 18 Feb 2025 15:40:24 +0000 Subject: [PATCH 13/13] fix - only use simd_shuffle! on intrinsics that require it --- .../core_arch/src/aarch64/neon/generated.rs | 50078 ++----- .../src/arm_shared/neon/generated.rs | 113608 ++++----------- crates/stdarch-gen-arm/src/big_endian.rs | 42 +- crates/stdarch-gen-arm/src/intrinsic.rs | 19 +- 4 files changed, 44282 insertions(+), 119465 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 268a774751..03c56ff0cb 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -55,7 +55,6 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -67,31 +66,10 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8 simd_add(a, simd_cast(f)) } #[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] -pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: int8x8_t = vabd_s8(d, e); - let f: uint8x8_t = simd_cast(f); - let ret_val: int16x8_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -103,31 +81,10 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x simd_add(a, simd_cast(f)) } #[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] -pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let f: int16x4_t = vabd_s16(d, e); - let f: uint16x4_t = simd_cast(f); - let ret_val: int32x4_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] @@ -138,32 +95,11 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x let f: uint32x2_t = simd_cast(f); simd_add(a, simd_cast(f)) } -#[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] -pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let e: int32x2_t = simd_shuffle!(c, c, [2, 3]); - let f: int32x2_t = vabd_s32(d, e); - let f: uint32x2_t = simd_cast(f); - let ret_val: int64x2_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -174,30 +110,10 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint simd_add(a, simd_cast(f)) } #[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] -pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint8x8_t = vabd_u8(d, e); - let ret_val: uint16x8_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -208,30 +124,10 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin simd_add(a, simd_cast(f)) } #[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] -pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let f: uint16x4_t = vabd_u16(d, e); - let ret_val: uint32x4_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] @@ -241,25 +137,6 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin let f: uint32x2_t = vabd_u32(d, e); simd_add(a, simd_cast(f)) } -#[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] -pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - let f: uint32x2_t = vabd_u32(d, e); - let ret_val: uint64x2_t = simd_add(a, simd_cast(f)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"] #[doc = "## Safety"] @@ -283,7 +160,6 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] @@ -297,28 +173,6 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vabdq_f64(a, b) } -#[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fabd))] -pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v2f64" - )] - fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vabdq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"] #[doc = "## Safety"] @@ -346,7 +200,6 @@ pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -357,29 +210,10 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { simd_cast(e) } #[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] -pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let e: uint16x4_t = simd_cast(vabd_s16(c, d)); - let ret_val: int32x4_t = simd_cast(e); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -390,29 +224,10 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { simd_cast(e) } #[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] -pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let e: uint32x2_t = simd_cast(vabd_s32(c, d)); - let ret_val: int64x2_t = simd_cast(e); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl))] @@ -422,30 +237,11 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let e: uint8x8_t = simd_cast(vabd_s8(c, d)); simd_cast(e) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] -pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x8_t = simd_cast(vabd_s8(c, d)); - let ret_val: int16x8_t = simd_cast(e); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -455,28 +251,10 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { simd_cast(vabd_u8(c, d)) } #[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = simd_cast(vabd_u8(c, d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -486,28 +264,10 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { simd_cast(vabd_u16(c, d)) } #[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: uint32x4_t = simd_cast(vabd_u16(c, d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -516,23 +276,6 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); simd_cast(vabd_u32(c, d)) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: uint64x2_t = simd_cast(vabd_u32(c, d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"] #[doc = "## Safety"] @@ -549,27 +292,12 @@ pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { simd_fabs(a) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fabs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_fabs(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"] #[doc = "## Safety"] @@ -611,7 +339,6 @@ pub unsafe fn vabsd_s64(a: i64) -> i64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] @@ -625,27 +352,6 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { } _vabsq_s64(a) } -#[doc = "Absolute Value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(abs))] -pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i64" - )] - fn _vabsq_s64(a: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vabsq_s64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] #[doc = "## Safety"] @@ -673,26 +379,6 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlv))] -pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i32.v4i16" - )] - fn _vaddlv_s16(a: int16x4_t) -> i32; - } - _vaddlv_s16(a) -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -704,7 +390,6 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { )] fn _vaddlv_s16(a: int16x4_t) -> i32; } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlv_s16(a) } #[doc = "Signed Add Long across Vector"] @@ -712,26 +397,6 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlv))] -pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i32.v8i16" - )] - fn _vaddlvq_s16(a: int16x8_t) -> i32; - } - _vaddlvq_s16(a) -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -743,7 +408,6 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { )] fn _vaddlvq_s16(a: int16x8_t) -> i32; } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlvq_s16(a) } #[doc = "Signed Add Long across Vector"] @@ -751,26 +415,6 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlv))] -pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i64.v4i32" - )] - fn _vaddlvq_s32(a: int32x4_t) -> i64; - } - _vaddlvq_s32(a) -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -782,7 +426,6 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { )] fn _vaddlvq_s32(a: int32x4_t) -> i64; } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlvq_s32(a) } #[doc = "Signed Add Long across Vector"] @@ -790,26 +433,6 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlp))] -pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i64.v2i32" - )] - fn _vaddlv_s32(a: int32x2_t) -> i64; - } - _vaddlv_s32(a) -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] @@ -821,7 +444,6 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { )] fn _vaddlv_s32(a: int32x2_t) -> i64; } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddlv_s32(a) } #[doc = "Signed Add Long across Vector"] @@ -829,26 +451,6 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlv))] -pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i32.v8i8" - )] - fn _vaddlv_s8(a: int8x8_t) -> i32; - } - _vaddlv_s8(a) as i16 -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -860,7 +462,6 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { )] fn _vaddlv_s8(a: int8x8_t) -> i32; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlv_s8(a) as i16 } #[doc = "Signed Add Long across Vector"] @@ -868,26 +469,6 @@ pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(saddlv))] -pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlv.i32.v16i8" - )] - fn _vaddlvq_s8(a: int8x16_t) -> i32; - } - _vaddlvq_s8(a) as i16 -} -#[doc = "Signed Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -899,7 +480,6 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { )] fn _vaddlvq_s8(a: int8x16_t) -> i32; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddlvq_s8(a) as i16 } #[doc = "Unsigned Add Long across Vector"] @@ -907,26 +487,6 @@ pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" - )] - fn _vaddlv_u16(a: int16x4_t) -> i32; - } - _vaddlv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -938,7 +498,6 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { )] fn _vaddlv_u16(a: int16x4_t) -> i32; } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlv_u16(a.as_signed()).as_unsigned() } #[doc = "Unsigned Add Long across Vector"] @@ -946,26 +505,6 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" - )] - fn _vaddlvq_u16(a: int16x8_t) -> i32; - } - _vaddlvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -977,7 +516,6 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { )] fn _vaddlvq_u16(a: int16x8_t) -> i32; } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlvq_u16(a.as_signed()).as_unsigned() } #[doc = "Unsigned Add Long across Vector"] @@ -985,26 +523,6 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" - )] - fn _vaddlvq_u32(a: int32x4_t) -> i64; - } - _vaddlvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -1016,7 +534,6 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { )] fn _vaddlvq_u32(a: int32x4_t) -> i64; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddlvq_u32(a.as_signed()).as_unsigned() } #[doc = "Unsigned Add Long across Vector"] @@ -1024,26 +541,6 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlp))] -pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" - )] - fn _vaddlv_u32(a: int32x2_t) -> i64; - } - _vaddlv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlp))] @@ -1055,7 +552,6 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { )] fn _vaddlv_u32(a: int32x2_t) -> i64; } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddlv_u32(a.as_signed()).as_unsigned() } #[doc = "Unsigned Add Long across Vector"] @@ -1063,26 +559,6 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" - )] - fn _vaddlv_u8(a: int8x8_t) -> i32; - } - _vaddlv_u8(a.as_signed()).as_unsigned() as u16 -} -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -1094,7 +570,6 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { )] fn _vaddlv_u8(a: int8x8_t) -> i32; } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddlv_u8(a.as_signed()).as_unsigned() as u16 } #[doc = "Unsigned Add Long across Vector"] @@ -1102,7 +577,6 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -1116,133 +590,51 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { } _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 } -#[doc = "Unsigned Add Long across Vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] +#[doc = "Floating-point add across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uaddlv))] -pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" + link_name = "llvm.aarch64.neon.faddv.f32.v2f32" )] - fn _vaddlvq_u8(a: int8x16_t) -> i32; + fn _vaddv_f32(a: float32x2_t) -> f32; } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 + _vaddv_f32(a) } #[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { +pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v2f32" + link_name = "llvm.aarch64.neon.faddv.f32.v4f32" )] - fn _vaddv_f32(a: float32x2_t) -> f32; + fn _vaddvq_f32(a: float32x4_t) -> f32; } - _vaddv_f32(a) + _vaddvq_f32(a) } #[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v2f32" - )] - fn _vaddv_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vaddv_f32(a) -} -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v4f32" - )] - fn _vaddvq_f32(a: float32x4_t) -> f32; - } - _vaddvq_f32(a) -} -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f32.v4f32" - )] - fn _vaddvq_f32(a: float32x4_t) -> f32; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vaddvq_f32(a) -} -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddv.f64.v2f64" - )] - fn _vaddvq_f64(a: float64x2_t) -> f64; - } - _vaddvq_f64(a) -} -#[doc = "Floating-point add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { +pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -1250,7 +642,6 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { )] fn _vaddvq_f64(a: float64x2_t) -> f64; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_f64(a) } #[doc = "Add across vector"] @@ -1258,26 +649,6 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v2i32" - )] - fn _vaddv_s32(a: int32x2_t) -> i32; - } - _vaddv_s32(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -1289,7 +660,6 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { )] fn _vaddv_s32(a: int32x2_t) -> i32; } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddv_s32(a) } #[doc = "Add across vector"] @@ -1297,26 +667,6 @@ pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v8i8" - )] - fn _vaddv_s8(a: int8x8_t) -> i8; - } - _vaddv_s8(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1328,7 +678,6 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { )] fn _vaddv_s8(a: int8x8_t) -> i8; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddv_s8(a) } #[doc = "Add across vector"] @@ -1336,26 +685,6 @@ pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v16i8" - )] - fn _vaddvq_s8(a: int8x16_t) -> i8; - } - _vaddvq_s8(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1367,7 +696,6 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { )] fn _vaddvq_s8(a: int8x16_t) -> i8; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddvq_s8(a) } #[doc = "Add across vector"] @@ -1375,26 +703,6 @@ pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v4i16" - )] - fn _vaddv_s16(a: int16x4_t) -> i16; - } - _vaddv_s16(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1406,7 +714,6 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { )] fn _vaddv_s16(a: int16x4_t) -> i16; } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddv_s16(a) } #[doc = "Add across vector"] @@ -1414,26 +721,6 @@ pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v8i16" - )] - fn _vaddvq_s16(a: int16x8_t) -> i16; - } - _vaddvq_s16(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1445,7 +732,6 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { )] fn _vaddvq_s16(a: int16x8_t) -> i16; } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddvq_s16(a) } #[doc = "Add across vector"] @@ -1453,26 +739,6 @@ pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v4i32" - )] - fn _vaddvq_s32(a: int32x4_t) -> i32; - } - _vaddvq_s32(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1484,7 +750,6 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { )] fn _vaddvq_s32(a: int32x4_t) -> i32; } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddvq_s32(a) } #[doc = "Add across vector"] @@ -1492,26 +757,6 @@ pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" - )] - fn _vaddv_u32(a: int32x2_t) -> i32; - } - _vaddv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -1523,7 +768,6 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { )] fn _vaddv_u32(a: int32x2_t) -> i32; } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); _vaddv_u32(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1531,26 +775,6 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" - )] - fn _vaddv_u8(a: int8x8_t) -> i8; - } - _vaddv_u8(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1562,7 +786,6 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { )] fn _vaddv_u8(a: int8x8_t) -> i8; } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddv_u8(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1570,26 +793,6 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" - )] - fn _vaddvq_u8(a: int8x16_t) -> i8; - } - _vaddvq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1601,7 +804,6 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { )] fn _vaddvq_u8(a: int8x16_t) -> i8; } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); _vaddvq_u8(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1609,26 +811,6 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" - )] - fn _vaddv_u16(a: int16x4_t) -> i16; - } - _vaddv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1640,7 +822,6 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { )] fn _vaddv_u16(a: int16x4_t) -> i16; } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddv_u16(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1648,26 +829,6 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" - )] - fn _vaddvq_u16(a: int16x8_t) -> i16; - } - _vaddvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1679,7 +840,6 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { )] fn _vaddvq_u16(a: int16x8_t) -> i16; } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); _vaddvq_u16(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1687,26 +847,6 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addv))] -pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" - )] - fn _vaddvq_u32(a: int32x4_t) -> i32; - } - _vaddvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -1718,7 +858,6 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { )] fn _vaddvq_u32(a: int32x4_t) -> i32; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); _vaddvq_u32(a.as_signed()).as_unsigned() } #[doc = "Add across vector"] @@ -1726,26 +865,6 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i64.v2i64" - )] - fn _vaddvq_s64(a: int64x2_t) -> i64; - } - _vaddvq_s64(a) -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -1757,7 +876,6 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { )] fn _vaddvq_s64(a: int64x2_t) -> i64; } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_s64(a) } #[doc = "Add across vector"] @@ -1765,26 +883,6 @@ pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" - )] - fn _vaddvq_u64(a: int64x2_t) -> i64; - } - _vaddvq_u64(a.as_signed()).as_unsigned() -} -#[doc = "Add across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -1796,7 +894,6 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { )] fn _vaddvq_u64(a: int64x2_t) -> i64; } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); _vaddvq_u64(a.as_signed()).as_unsigned() } #[doc = "Bit clear and exclusive OR"] @@ -1804,7 +901,6 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1819,38 +915,10 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { _vbcaxq_s8(a, b, c) } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v16i8" - )] - fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vbcaxq_s8(a, b, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1865,34 +933,10 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t _vbcaxq_s16(a, b, c) } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v8i16" - )] - fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vbcaxq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1907,34 +951,10 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t _vbcaxq_s32(a, b, c) } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v4i32" - )] - fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vbcaxq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1949,34 +969,10 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t _vbcaxq_s64(a, b, c) } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxs.v2i64" - )] - fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = _vbcaxq_s64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1991,38 +987,10 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v16i8" - )] - fn _vbcaxq_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -2037,35 +1005,10 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v8i16" - )] - fn _vbcaxq_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = - _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -2080,78 +1023,28 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } #[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { +pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v4i32" + link_name = "llvm.aarch64.crypto.bcaxu.v2i64" )] - fn _vbcaxq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v2i64" - )] - fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "Bit clear and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(bcax))] -pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.bcaxu.v2i64" - )] - fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = - _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2166,33 +1059,10 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { _vcadd_rot270_f32(a, b) } #[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" - )] - fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vcadd_rot270_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2207,33 +1077,10 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { _vcaddq_rot270_f32(a, b) } #[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" - )] - fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcaddq_rot270_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2248,33 +1095,10 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { _vcaddq_rot270_f64(a, b) } #[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" - )] - fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vcaddq_rot270_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2289,33 +1113,10 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { _vcadd_rot90_f32(a, b) } #[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" - )] - fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vcadd_rot90_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2330,33 +1131,10 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { _vcaddq_rot90_f32(a, b) } #[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" - )] - fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcaddq_rot90_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -2370,28 +1148,6 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } _vcaddq_rot90_f64(a, b) } -#[doc = "Floating-point complex add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcadd))] -pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" - )] - fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vcaddq_rot90_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] #[doc = "## Safety"] @@ -2415,7 +1171,6 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2430,28 +1185,6 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { _vcageq_f64(a, b).as_unsigned() } #[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" - )] - fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vcageq_f64(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2510,7 +1243,6 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2525,28 +1257,6 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { _vcagtq_f64(a, b).as_unsigned() } #[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" - )] - fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vcagtq_f64(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2598,7 +1308,6 @@ pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2606,21 +1315,6 @@ pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcageq_f64(b, a) } #[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vcageq_f64(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2658,7 +1352,6 @@ pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2666,21 +1359,6 @@ pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { vcagtq_f64(b, a) } #[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(facgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vcagtq_f64(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2718,28 +1396,12 @@ pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_eq(a, b) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"] #[doc = "## Safety"] @@ -2756,7 +1418,6 @@ pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2764,21 +1425,6 @@ pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_eq(a, b) } #[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2794,7 +1440,6 @@ pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2802,21 +1447,6 @@ pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_eq(a, b) } #[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2832,28 +1462,12 @@ pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { simd_eq(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"] #[doc = "## Safety"] @@ -2903,7 +1517,6 @@ pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2912,26 +1525,10 @@ pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { simd_eq(a, transmute(b)) } #[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f32x2 = f32x2::new(0.0, 0.0); - let ret_val: uint32x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2940,21 +1537,6 @@ pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { simd_eq(a, transmute(b)) } #[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - let ret_val: uint32x4_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -2971,7 +1553,6 @@ pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2979,27 +1560,11 @@ pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_eq(a, transmute(b)) } -#[doc = "Floating-point compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f64x2 = f64x2::new(0.0, 0.0); - let ret_val: uint64x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3008,26 +1573,10 @@ pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3036,30 +1585,10 @@ pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_eq(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3068,26 +1597,10 @@ pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3096,26 +1609,10 @@ pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3124,26 +1621,10 @@ pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3152,21 +1633,6 @@ pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -3183,7 +1649,6 @@ pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3192,26 +1657,10 @@ pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3220,26 +1669,10 @@ pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3248,25 +1681,6 @@ pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { simd_eq(a, transmute(b)) } #[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_eq(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -3283,7 +1697,6 @@ pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3291,27 +1704,11 @@ pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_eq(a, transmute(b)) } -#[doc = "Signed compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3320,204 +1717,89 @@ pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { + let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { - let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { + let b: u16x4 = u16x4::new(0, 0, 0, 0); simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_eq(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { + let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { - let b: u16x4 = u16x4::new(0, 0, 0, 0); +pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { + let b: u32x2 = u32x2::new(0, 0); simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: u16x4 = u16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { + let b: u32x4 = u32x4::new(0, 0, 0, 0); + simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { - let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); +pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { + let b: u64x1 = u64x1::new(0); simd_eq(a, transmute(b)) } #[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { - let b: u32x2 = u32x2::new(0, 0); - simd_eq(a, transmute(b)) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: u32x2 = u32x2::new(0, 0); - let ret_val: uint32x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { - let b: u32x4 = u32x4::new(0, 0, 0, 0); - simd_eq(a, transmute(b)) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: u32x4 = u32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { - let b: u64x1 = u64x1::new(0); - simd_eq(a, transmute(b)) -} -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { let b: u64x2 = u64x2::new(0, 0); simd_eq(a, transmute(b)) } -#[doc = "Unsigned compare bitwise equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmeq))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: u64x2 = u64x2::new(0, 0); - let ret_val: uint64x2_t = simd_eq(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"] #[doc = "## Safety"] @@ -3578,28 +1860,12 @@ pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_ge(a, b) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"] #[doc = "## Safety"] @@ -3616,28 +1882,12 @@ pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_ge(a, b) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"] #[doc = "## Safety"] @@ -3654,28 +1904,12 @@ pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_ge(a, b) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"] #[doc = "## Safety"] @@ -3725,7 +1959,6 @@ pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3734,26 +1967,10 @@ pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { simd_ge(a, transmute(b)) } #[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f32x2 = f32x2::new(0.0, 0.0); - let ret_val: uint32x2_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3762,21 +1979,6 @@ pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { simd_ge(a, transmute(b)) } #[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - let ret_val: uint32x4_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -3793,7 +1995,6 @@ pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3801,27 +2002,11 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_ge(a, transmute(b)) } -#[doc = "Floating-point compare greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f64x2 = f64x2::new(0.0, 0.0); - let ret_val: uint64x2_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3830,26 +2015,10 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3858,30 +2027,10 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_ge(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3890,26 +2039,10 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3918,26 +2051,10 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3946,26 +2063,10 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -3974,21 +2075,6 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { simd_ge(a, transmute(b)) } #[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -4005,7 +2091,6 @@ pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4013,21 +2098,6 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_ge(a, transmute(b)) } -#[doc = "Compare signed greater than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_ge(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"] #[doc = "## Safety"] @@ -4077,28 +2147,12 @@ pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_gt(a, b) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"] #[doc = "## Safety"] @@ -4115,28 +2169,12 @@ pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_gt(a, b) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"] #[doc = "## Safety"] @@ -4153,28 +2191,12 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_gt(a, b) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"] #[doc = "## Safety"] @@ -4224,7 +2246,6 @@ pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4233,26 +2254,10 @@ pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { simd_gt(a, transmute(b)) } #[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f32x2 = f32x2::new(0.0, 0.0); - let ret_val: uint32x2_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4261,21 +2266,6 @@ pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { simd_gt(a, transmute(b)) } #[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - let ret_val: uint32x4_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -4292,7 +2282,6 @@ pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4300,27 +2289,11 @@ pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_gt(a, transmute(b)) } -#[doc = "Floating-point compare greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f64x2 = f64x2::new(0.0, 0.0); - let ret_val: uint64x2_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4329,26 +2302,10 @@ pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4357,30 +2314,10 @@ pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_gt(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4389,26 +2326,10 @@ pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4417,26 +2338,10 @@ pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4445,26 +2350,10 @@ pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4473,21 +2362,6 @@ pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { simd_gt(a, transmute(b)) } #[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -4504,7 +2378,6 @@ pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4512,21 +2385,6 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_gt(a, transmute(b)) } -#[doc = "Compare signed greater than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_gt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"] #[doc = "## Safety"] @@ -4576,28 +2434,12 @@ pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_le(a, b) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"] #[doc = "## Safety"] @@ -4614,28 +2456,12 @@ pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_le(a, b) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmge))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"] #[doc = "## Safety"] @@ -4652,28 +2478,12 @@ pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_le(a, b) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"] #[doc = "## Safety"] @@ -4723,7 +2533,6 @@ pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4732,26 +2541,10 @@ pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { simd_le(a, transmute(b)) } #[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f32x2 = f32x2::new(0.0, 0.0); - let ret_val: uint32x2_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4760,21 +2553,6 @@ pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { simd_le(a, transmute(b)) } #[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - let ret_val: uint32x4_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -4791,7 +2569,6 @@ pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4799,27 +2576,11 @@ pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_le(a, transmute(b)) } -#[doc = "Floating-point compare less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f64x2 = f64x2::new(0.0, 0.0); - let ret_val: uint64x2_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4828,26 +2589,10 @@ pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4856,30 +2601,10 @@ pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_le(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4888,26 +2613,10 @@ pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4916,26 +2625,10 @@ pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4944,26 +2637,10 @@ pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -4972,21 +2649,6 @@ pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { simd_le(a, transmute(b)) } #[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -5003,7 +2665,6 @@ pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5011,21 +2672,6 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_le(a, transmute(b)) } -#[doc = "Compare signed less than or equal to zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmle))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_le(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"] #[doc = "## Safety"] @@ -5075,28 +2721,12 @@ pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { simd_lt(a, b) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"] #[doc = "## Safety"] @@ -5113,28 +2743,12 @@ pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_lt(a, b) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmgt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"] #[doc = "## Safety"] @@ -5151,28 +2765,12 @@ pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_lt(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmhi))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"] #[doc = "## Safety"] @@ -5222,7 +2820,6 @@ pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5231,26 +2828,10 @@ pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { simd_lt(a, transmute(b)) } #[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f32x2 = f32x2::new(0.0, 0.0); - let ret_val: uint32x2_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5259,21 +2840,6 @@ pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { simd_lt(a, transmute(b)) } #[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0); - let ret_val: uint32x4_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -5290,7 +2856,6 @@ pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5298,27 +2863,11 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { let b: f64x2 = f64x2::new(0.0, 0.0); simd_lt(a, transmute(b)) } -#[doc = "Floating-point compare less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: f64x2 = f64x2::new(0.0, 0.0); - let ret_val: uint64x2_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5327,26 +2876,10 @@ pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5355,30 +2888,10 @@ pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_lt(a, transmute(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5387,26 +2900,10 @@ pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5415,26 +2912,10 @@ pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5443,26 +2924,10 @@ pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5471,21 +2936,6 @@ pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { simd_lt(a, transmute(b)) } #[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] @@ -5502,7 +2952,6 @@ pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -5510,21 +2959,6 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { let b: i64x2 = i64x2::new(0, 0); simd_lt(a, transmute(b)) } -#[doc = "Compare signed less than zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmlt))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_lt(a, transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"] #[doc = "## Safety"] @@ -5563,7 +2997,6 @@ pub unsafe fn vcltzd_s64(a: i64) -> u64 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5578,34 +3011,10 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float _vcmla_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" - )] - fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = _vcmla_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5620,34 +3029,10 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa _vcmlaq_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" - )] - fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcmlaq_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5662,34 +3047,10 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa _vcmlaq_f64(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" - )] - fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = _vcmlaq_f64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -5704,34 +3065,10 @@ pub unsafe fn vcmla_lane_f32( vcmla_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -5755,43 +3092,10 @@ pub unsafe fn vcmlaq_lane_f32( vcmlaq_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -5806,34 +3110,10 @@ pub unsafe fn vcmla_laneq_f32( vcmla_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -5857,43 +3137,10 @@ pub unsafe fn vcmlaq_laneq_f32( vcmlaq_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5908,34 +3155,10 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - _vcmla_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" - )] - fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = _vcmla_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5950,34 +3173,10 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) _vcmlaq_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" - )] - fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcmlaq_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -5992,34 +3191,10 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) _vcmlaq_rot180_f64(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" - )] - fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = _vcmlaq_rot180_f64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6034,34 +3209,10 @@ pub unsafe fn vcmla_rot180_lane_f32( vcmla_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot180_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6085,43 +3236,10 @@ pub unsafe fn vcmlaq_rot180_lane_f32( vcmlaq_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot180_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6136,34 +3254,10 @@ pub unsafe fn vcmla_rot180_laneq_f32( vcmla_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot180_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6187,43 +3281,10 @@ pub unsafe fn vcmlaq_rot180_laneq_f32( vcmlaq_rot180_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot180_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot180_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6238,34 +3299,10 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - _vcmla_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" - )] - fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = _vcmla_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6280,34 +3317,10 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) _vcmlaq_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" - )] - fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcmlaq_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6322,34 +3335,10 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) _vcmlaq_rot270_f64(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" - )] - fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = _vcmlaq_rot270_f64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6364,34 +3353,10 @@ pub unsafe fn vcmla_rot270_lane_f32( vcmla_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot270_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6415,43 +3380,10 @@ pub unsafe fn vcmlaq_rot270_lane_f32( vcmlaq_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot270_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6466,34 +3398,10 @@ pub unsafe fn vcmla_rot270_laneq_f32( vcmla_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot270_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6517,43 +3425,10 @@ pub unsafe fn vcmlaq_rot270_laneq_f32( vcmlaq_rot270_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot270_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot270_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6568,34 +3443,10 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> _vcmla_rot90_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" - )] - fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = _vcmla_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6610,34 +3461,10 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - _vcmlaq_rot90_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" - )] - fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcmlaq_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -6652,34 +3479,10 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) - _vcmlaq_rot90_f64(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -#[cfg_attr(test, assert_instr(fcmla))] -pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" - )] - fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = _vcmlaq_rot90_f64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6694,34 +3497,10 @@ pub unsafe fn vcmla_rot90_lane_f32( vcmla_rot90_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot90_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert!(LANE == 0); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6745,43 +3524,10 @@ pub unsafe fn vcmlaq_rot90_lane_f32( vcmlaq_rot90_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot90_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert!(LANE == 0); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6796,34 +3542,10 @@ pub unsafe fn vcmla_rot90_laneq_f32( vcmla_rot90_f32(a, b, c) } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmla_rot90_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); - let ret_val: float32x2_t = vcmla_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -6846,44 +3568,11 @@ pub unsafe fn vcmlaq_rot90_laneq_f32( ); vcmlaq_rot90_f32(a, b, c) } -#[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,fcma")] -#[cfg_attr(test, assert_instr(fcmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] -pub unsafe fn vcmlaq_rot90_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!( - c, - c, - [ - 2 * LANE as u32, - 2 * LANE as u32 + 1, - 2 * LANE as u32, - 2 * LANE as u32 + 1 - ] - ); - let ret_val: float32x4_t = vcmlaq_rot90_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -6901,36 +3590,10 @@ pub unsafe fn vcopy_lane_f32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_f32( - a: float32x2_t, - b: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -6954,42 +3617,10 @@ pub unsafe fn vcopy_lane_s8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s8( - a: int8x8_t, - b: int8x8_t, -) -> int8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7009,38 +3640,10 @@ pub unsafe fn vcopy_lane_s16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s16( - a: int16x4_t, - b: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7058,36 +3661,10 @@ pub unsafe fn vcopy_lane_s32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_s32( - a: int32x2_t, - b: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7111,42 +3688,10 @@ pub unsafe fn vcopy_lane_u8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u8( - a: uint8x8_t, - b: uint8x8_t, -) -> uint8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7166,38 +3711,10 @@ pub unsafe fn vcopy_lane_u16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u16( - a: uint16x4_t, - b: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7215,36 +3732,10 @@ pub unsafe fn vcopy_lane_u32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_u32( - a: uint32x2_t, - b: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7268,42 +3759,10 @@ pub unsafe fn vcopy_lane_p8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_p8( - a: poly8x8_t, - b: poly8x8_t, -) -> poly8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7323,38 +3782,10 @@ pub unsafe fn vcopy_lane_p16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_lane_p16( - a: poly16x4_t, - b: poly16x4_t, -) -> poly16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7373,37 +3804,10 @@ pub unsafe fn vcopy_laneq_f32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_f32( - a: float32x2_t, - b: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7428,43 +3832,10 @@ pub unsafe fn vcopy_laneq_s8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s8( - a: int8x8_t, - b: int8x16_t, -) -> int8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7485,39 +3856,10 @@ pub unsafe fn vcopy_laneq_s16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s16( - a: int16x4_t, - b: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7536,37 +3878,10 @@ pub unsafe fn vcopy_laneq_s32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_s32( - a: int32x2_t, - b: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7591,43 +3906,10 @@ pub unsafe fn vcopy_laneq_u8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u8( - a: uint8x8_t, - b: uint8x16_t, -) -> uint8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7648,39 +3930,10 @@ pub unsafe fn vcopy_laneq_u16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u16( - a: uint16x4_t, - b: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7699,37 +3952,10 @@ pub unsafe fn vcopy_laneq_u32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_u32( - a: uint32x2_t, - b: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7754,43 +3980,10 @@ pub unsafe fn vcopy_laneq_p8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_p8( - a: poly8x8_t, - b: poly8x16_t, -) -> poly8x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 4); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -7811,39 +4004,10 @@ pub unsafe fn vcopy_laneq_p16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopy_laneq_p16( - a: poly16x4_t, - b: poly16x8_t, -) -> poly16x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 3); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7864,39 +4028,10 @@ pub unsafe fn vcopyq_lane_f32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_f32( - a: float32x4_t, - b: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7915,36 +4050,10 @@ pub unsafe fn vcopyq_lane_f64( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_f64( - a: float64x2_t, - b: float64x1_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7963,36 +4072,10 @@ pub unsafe fn vcopyq_lane_s64( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s64( - a: int64x2_t, - b: int64x1_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -8011,36 +4094,10 @@ pub unsafe fn vcopyq_lane_u64( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u64( - a: uint64x2_t, - b: uint64x1_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -8059,36 +4116,10 @@ pub unsafe fn vcopyq_lane_p64( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p64( - a: poly64x2_t, - b: poly64x1_t, -) -> poly64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert!(LANE2 == 0); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] @@ -8457,67 +4488,116 @@ pub unsafe fn vcopyq_lane_s8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s8( - a: int8x16_t, - b: int8x8_t, -) -> int8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 +pub unsafe fn vcopyq_lane_s16( + a: int16x8_t, + b: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_s32( + a: int32x4_t, + b: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_lane_u8( + a: uint8x16_t, + b: uint8x8_t, +) -> uint8x16_t { + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); + let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + match LANE1 & 0b1111 { + 0 => simd_shuffle!( + a, + b, + [ + 16 + LANE2 as u32, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 + ] + ), + 1 => simd_shuffle!( + a, + b, + [ + 0, + 16 + LANE2 as u32, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15 ] ), 2 => simd_shuffle!( @@ -8829,30 +4909,24 @@ pub unsafe fn vcopyq_lane_s8( ] ), _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s16( - a: int16x8_t, - b: int16x4_t, -) -> int16x8_t { +pub unsafe fn vcopyq_lane_u16( + a: uint16x8_t, + b: uint16x4_t, +) -> uint16x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 2); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -8866,54 +4940,21 @@ pub unsafe fn vcopyq_lane_s16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s16( - a: int16x8_t, - b: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s32( - a: int32x4_t, - b: int32x2_t, -) -> int32x4_t { +pub unsafe fn vcopyq_lane_u32( + a: uint32x4_t, + b: uint32x2_t, +) -> uint32x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 1); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -8923,50 +4964,21 @@ pub unsafe fn vcopyq_lane_s32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_s32( - a: int32x4_t, - b: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u8( - a: uint8x16_t, - b: uint8x8_t, -) -> uint8x16_t { +pub unsafe fn vcopyq_lane_p8( + a: poly8x16_t, + b: poly8x8_t, +) -> poly8x16_t { static_assert_uimm_bits!(LANE1, 4); static_assert_uimm_bits!(LANE2, 3); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!( a, @@ -9324,25 +5336,93 @@ pub unsafe fn vcopyq_lane_u8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u8( - a: uint8x16_t, - b: uint8x8_t, -) -> uint8x16_t { +pub unsafe fn vcopyq_lane_p16( + a: poly16x8_t, + b: poly16x4_t, +) -> poly16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f32( + a: float32x4_t, + b: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_f64( + a: float64x2_t, + b: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_s8( + a: int8x16_t, + b: int8x16_t, +) -> int8x16_t { static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = match LANE1 & 0b1111 { + static_assert_uimm_bits!(LANE2, 4); + match LANE1 & 0b1111 { 0 => simd_shuffle!( a, b, @@ -9696,30 +5776,23 @@ pub unsafe fn vcopyq_lane_u8( ] ), _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u16( - a: uint16x8_t, - b: uint16x4_t, -) -> uint16x8_t { +pub unsafe fn vcopyq_laneq_s16( + a: int16x8_t, + b: int16x8_t, +) -> int16x8_t { static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -9733,54 +5806,20 @@ pub unsafe fn vcopyq_lane_u16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u16( - a: uint16x8_t, - b: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u32( - a: uint32x4_t, - b: uint32x2_t, -) -> uint32x4_t { +pub unsafe fn vcopyq_laneq_s32( + a: int32x4_t, + b: int32x4_t, +) -> int32x4_t { static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 1); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -9790,50 +5829,41 @@ pub unsafe fn vcopyq_lane_u32( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_u32( - a: uint32x4_t, - b: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE1, 2); +pub unsafe fn vcopyq_laneq_s64( + a: int64x2_t, + b: int64x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE1, 1); static_assert_uimm_bits!(LANE2, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p8( - a: poly8x16_t, - b: poly8x8_t, -) -> poly8x16_t { +pub unsafe fn vcopyq_laneq_u8( + a: uint8x16_t, + b: uint8x16_t, +) -> uint8x16_t { static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + static_assert_uimm_bits!(LANE2, 4); match LANE1 & 0b1111 { 0 => simd_shuffle!( a, @@ -10191,25 +6221,92 @@ pub unsafe fn vcopyq_lane_p8( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p8( +pub unsafe fn vcopyq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); + match LANE1 & 0b111 { + 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), + 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), + 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), + 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), + 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), + 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); + match LANE1 & 0b11 { + 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), + 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), + 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_u64( + a: uint64x2_t, + b: uint64x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), + _ => unreachable_unchecked(), + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcopyq_laneq_p8( a: poly8x16_t, - b: poly8x8_t, + b: poly8x16_t, ) -> poly8x16_t { static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 3); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = match LANE1 & 0b1111 { + static_assert_uimm_bits!(LANE2, 4); + match LANE1 & 0b1111 { 0 => simd_shuffle!( a, b, @@ -10563,30 +6660,23 @@ pub unsafe fn vcopyq_lane_p8( ] ), _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p16( +pub unsafe fn vcopyq_laneq_p16( a: poly16x8_t, - b: poly16x4_t, + b: poly16x8_t, ) -> poly16x8_t { static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -10600,40673 +6690,20565 @@ pub unsafe fn vcopyq_lane_p16( } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_lane_p16( - a: poly16x8_t, - b: poly16x4_t, -) -> poly16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 2); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), +pub unsafe fn vcopyq_laneq_p64( + a: poly64x2_t, + b: poly64x2_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + match LANE1 & 0b1 { + 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), + 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f32( - a: float32x4_t, - b: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), +pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { + transmute(a) +} +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { + simd_cast(a) +} +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { + simd_cast(a) +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { + simd_cast(a) +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { + simd_cast(a) +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { + simd_cast(a) +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ucvtf))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { + simd_cast(a) +} +#[doc = "Floating-point convert to lower precision narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtn))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) +} +#[doc = "Floating-point convert to higher precision long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { + let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); + simd_cast(b) +} +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" + )] + fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t; } + _vcvt_n_f64_s64(a, N) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f32( - a: float32x4_t, - b: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; + } + _vcvtq_n_f64_s64(a, N) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f64( - a: float64x2_t, - b: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), +pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" + )] + fn _vcvt_n_f64_u64(a: int64x1_t, n: i32) -> float64x1_t; } + _vcvt_n_f64_u64(a.as_signed(), N) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_f64( - a: float64x2_t, - b: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" + )] + fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; + } + _vcvtq_n_f64_u64(a.as_signed(), N) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s8( - a: int8x16_t, - b: int8x16_t, -) -> int8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s8( - a: int8x16_t, - b: int8x16_t, -) -> int8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s16( - a: int16x8_t, - b: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s16( - a: int16x8_t, - b: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s32( - a: int32x4_t, - b: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s32( - a: int32x4_t, - b: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s64( - a: int64x2_t, - b: int64x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_s64( - a: int64x2_t, - b: int64x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u8( - a: uint8x16_t, - b: uint8x16_t, -) -> uint8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u8( - a: uint8x16_t, - b: uint8x16_t, -) -> uint8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE1, 2); - static_assert_uimm_bits!(LANE2, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = match LANE1 & 0b11 { - 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), - 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u64( - a: uint64x2_t, - b: uint64x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_u64( - a: uint64x2_t, - b: uint64x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p8( - a: poly8x16_t, - b: poly8x16_t, -) -> poly8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p8( - a: poly8x16_t, - b: poly8x16_t, -) -> poly8x16_t { - static_assert_uimm_bits!(LANE1, 4); - static_assert_uimm_bits!(LANE2, 4); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = match LANE1 & 0b1111 { - 0 => simd_shuffle!( - a, - b, - [ - 16 + LANE2 as u32, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 1 => simd_shuffle!( - a, - b, - [ - 0, - 16 + LANE2 as u32, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 2 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 16 + LANE2 as u32, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 3 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 16 + LANE2 as u32, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 4 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 16 + LANE2 as u32, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 5 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 16 + LANE2 as u32, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 6 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 16 + LANE2 as u32, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 7 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 16 + LANE2 as u32, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 8 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 16 + LANE2 as u32, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 9 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16 + LANE2 as u32, - 10, - 11, - 12, - 13, - 14, - 15 - ] - ), - 10 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 16 + LANE2 as u32, - 11, - 12, - 13, - 14, - 15 - ] - ), - 11 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 16 + LANE2 as u32, - 12, - 13, - 14, - 15 - ] - ), - 12 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 16 + LANE2 as u32, - 13, - 14, - 15 - ] - ), - 13 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 16 + LANE2 as u32, - 14, - 15 - ] - ), - 14 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 16 + LANE2 as u32, - 15 - ] - ), - 15 => simd_shuffle!( - a, - b, - [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 16 + LANE2 as u32 - ] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p16( - a: poly16x8_t, - b: poly16x8_t, -) -> poly16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p16( - a: poly16x8_t, - b: poly16x8_t, -) -> poly16x8_t { - static_assert_uimm_bits!(LANE1, 3); - static_assert_uimm_bits!(LANE2, 3); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = match LANE1 & 0b111 { - 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), - 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), - 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), - 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), - 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), - 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), - 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p64( - a: poly64x2_t, - b: poly64x2_t, -) -> poly64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))] -#[rustc_legacy_const_generics(1, 3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcopyq_laneq_p64( - a: poly64x2_t, - b: poly64x2_t, -) -> poly64x2_t { - static_assert_uimm_bits!(LANE1, 1); - static_assert_uimm_bits!(LANE2, 1); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = match LANE1 & 0b1 { - 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), - 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcreate_f64(a: u64) -> float64x1_t { - transmute(a) -} -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { - simd_cast(a) -} -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { - simd_cast(a) -} -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { - simd_cast(a) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { - simd_cast(a) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { - simd_cast(a) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { - simd_cast(a) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to lower precision narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x4_t = simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { - let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); - simd_cast(b) -} -#[doc = "Floating-point convert to higher precision long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x2_t = simd_shuffle!(a, a, [2, 3]); - let ret_val: float64x2_t = simd_cast(b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64" - )] - fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t; - } - _vcvt_n_f64_s64(a, N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" - )] - fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; - } - _vcvtq_n_f64_s64(a, N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64" - )] - fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vcvtq_n_f64_s64(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" - )] - fn _vcvt_n_f64_u64(a: int64x1_t, n: i32) -> float64x1_t; - } - _vcvt_n_f64_u64(a.as_signed(), N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" - )] - fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; - } - _vcvtq_n_f64_u64(a.as_signed(), N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" - )] - fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vcvtq_n_f64_u64(a.as_signed(), N); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" - )] - fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t; - } - _vcvt_n_s64_f64(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" - )] - fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - _vcvtq_n_s64_f64(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" - )] - fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtq_n_s64_f64(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" - )] - fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> int64x1_t; - } - _vcvt_n_u64_f64(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" - )] - fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - _vcvtq_n_u64_f64(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" - )] - fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtq_n_u64_f64(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v1i64.v1f64" - )] - fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvt_s64_f64(a) -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i64.v2f64" - )] - fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtq_s64_f64(a) -} -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i64.v2f64" - )] - fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtq_s64_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v1i64.v1f64" - )] - fn _vcvt_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvt_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i64.v2f64" - )] - fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtq_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i64.v2f64" - )] - fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtq_u64_f64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" - )] - fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvta_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" - )] - fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvta_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" - )] - fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtaq_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" - )] - fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtaq_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" - )] - fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvta_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" - )] - fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtaq_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" - )] - fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtaq_s64_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" - )] - fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvta_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" - )] - fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvta_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" - )] - fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtaq_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" - )] - fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtaq_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" - )] - fn _vcvta_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvta_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" - )] - fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtaq_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" - )] - fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtaq_u64_f64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.i32.f32" - )] - fn _vcvtas_s32_f32(a: f32) -> i32; - } - _vcvtas_s32_f32(a) -} -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtas))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtas.i64.f64" - )] - fn _vcvtad_s64_f64(a: f64) -> i64; - } - _vcvtad_s64_f64(a) -} -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.i32.f32" - )] - fn _vcvtas_u32_f32(a: f32) -> i32; - } - _vcvtas_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtau))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtau.i64.f64" - )] - fn _vcvtad_u64_f64(a: f64) -> i64; - } - _vcvtad_u64_f64(a).as_unsigned() -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { - a as f64 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { - a as f32 -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" - )] - fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtm_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" - )] - fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvtm_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" - )] - fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtmq_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" - )] - fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtmq_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" - )] - fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtm_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" - )] - fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtmq_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" - )] - fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtmq_s64_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" - )] - fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtm_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" - )] - fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvtm_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" - )] - fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtmq_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" - )] - fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtmq_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" - )] - fn _vcvtm_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtm_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" - )] - fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtmq_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" - )] - fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtmq_u64_f64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.i32.f32" - )] - fn _vcvtms_s32_f32(a: f32) -> i32; - } - _vcvtms_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtms))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtms.i64.f64" - )] - fn _vcvtmd_s64_f64(a: f64) -> i64; - } - _vcvtmd_s64_f64(a) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" - )] - fn _vcvtms_u32_f32(a: f32) -> i32; - } - _vcvtms_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtmu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" - )] - fn _vcvtmd_u64_f64(a: f64) -> i64; - } - _vcvtmd_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" - )] - fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtn_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" - )] - fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvtn_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" - )] - fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtnq_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" - )] - fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtnq_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" - )] - fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtn_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" - )] - fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtnq_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" - )] - fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtnq_s64_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" - )] - fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtn_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" - )] - fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvtn_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" - )] - fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtnq_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" - )] - fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtnq_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" - )] - fn _vcvtn_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtn_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" - )] - fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtnq_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" - )] - fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtnq_u64_f64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.i32.f32" - )] - fn _vcvtns_s32_f32(a: f32) -> i32; - } - _vcvtns_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtns))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtns.i64.f64" - )] - fn _vcvtnd_s64_f64(a: f64) -> i64; - } - _vcvtnd_s64_f64(a) -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" - )] - fn _vcvtns_u32_f32(a: f32) -> i32; - } - _vcvtns_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtnu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" - )] - fn _vcvtnd_u64_f64(a: f64) -> i64; - } - _vcvtnd_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" - )] - fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtp_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" - )] - fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvtp_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" - )] - fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtpq_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" - )] - fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtpq_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" - )] - fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtp_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" - )] - fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtpq_s64_f64(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" - )] - fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vcvtpq_s64_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" - )] - fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; - } - _vcvtp_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" - )] - fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvtp_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" - )] - fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; - } - _vcvtpq_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" - )] - fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtpq_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" - )] - fn _vcvtp_u64_f64(a: float64x1_t) -> int64x1_t; - } - _vcvtp_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" - )] - fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; - } - _vcvtpq_u64_f64(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" - )] - fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = _vcvtpq_u64_f64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.i32.f32" - )] - fn _vcvtps_s32_f32(a: f32) -> i32; - } - _vcvtps_s32_f32(a) -} -#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtps))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtps.i64.f64" - )] - fn _vcvtpd_s64_f64(a: f64) -> i64; - } - _vcvtpd_s64_f64(a) -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" - )] - fn _vcvtps_u32_f32(a: f32) -> i32; - } - _vcvtps_u32_f32(a).as_unsigned() -} -#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtpu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" - )] - fn _vcvtpd_u64_f64(a: f64) -> i64; - } - _vcvtpd_u64_f64(a).as_unsigned() -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { - a as f32 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { - a as f64 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" - )] - fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32; - } - _vcvts_n_f32_s32(a, N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" - )] - fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64; - } - _vcvtd_n_f64_s64(a, N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" - )] - fn _vcvts_n_f32_u32(a: i32, n: i32) -> f32; - } - _vcvts_n_f32_u32(a.as_signed(), N) -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" - )] - fn _vcvtd_n_f64_u64(a: i64, n: i32) -> f64; - } - _vcvtd_n_f64_u64(a.as_signed(), N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" - )] - fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32; - } - _vcvts_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" - )] - fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64; - } - _vcvtd_n_s64_f64(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" - )] - fn _vcvts_n_u32_f32(a: f32, n: i32) -> i32; - } - _vcvts_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" - )] - fn _vcvtd_n_u64_f64(a: f64, n: i32) -> i64; - } - _vcvtd_n_u64_f64(a, N).as_unsigned() -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { - a as i32 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzs))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { - a as i64 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { - a as u32 -} -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtzu))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { - a as u64 -} -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" - )] - fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; - } - _vcvtx_f32_f64(a) -} -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" - )] - fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vcvtx_f32_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x4_t = simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { - simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_div(a, b) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_div(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_div(a, b) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_div(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_div(a, b) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_div(a, b) -} -#[doc = "Divide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fdiv))] -pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_div(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_s32(a, b, transmute(c)) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_s32(a, b, transmute(c)) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(sdot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x16_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_u32(a, b, transmute(c)) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdot_laneq_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x16_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x16_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_u32(a, b, transmute(c)) -} -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(test, assert_instr(udot, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] -pub unsafe fn vdotq_laneq_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x16_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { - static_assert!(N == 0); - a -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { - static_assert!(N == 0); - a -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(N, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - transmute::(simd_extract!(a, N as u32)) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { - static_assert_uimm_bits!(N, 1); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - transmute::(simd_extract!(a, N as u32)) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { - static_assert_uimm_bits!(N, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { - static_assert_uimm_bits!(N, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { - static_assert_uimm_bits!(N, 3); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 4))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { - static_assert_uimm_bits!(N, 3); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { - static_assert_uimm_bits!(N, 4); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { - static_assert_uimm_bits!(N, 4); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { - static_assert_uimm_bits!(N, 4); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 8))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { - static_assert_uimm_bits!(N, 4); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { - static_assert!(N == 0); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { - static_assert!(N == 0); - let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 0))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { - static_assert!(N == 0); - let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(N, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(dup, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(N, 1); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { - static_assert_uimm_bits!(N, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { - static_assert_uimm_bits!(N, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { - static_assert_uimm_bits!(N, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { - static_assert_uimm_bits!(N, 1); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 1))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { - static_assert_uimm_bits!(N, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { - static_assert_uimm_bits!(N, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { - static_assert_uimm_bits!(N, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { - static_assert_uimm_bits!(N, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { - static_assert_uimm_bits!(N, 2); - simd_extract!(a, N as u32) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { - static_assert_uimm_bits!(N, 2); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - simd_extract!(a, N as u32) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v16i8" - )] - fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _veor3q_s8(a, b, c) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v16i8" - )] - fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _veor3q_s8(a, b, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v8i16" - )] - fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _veor3q_s16(a, b, c) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v8i16" - )] - fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _veor3q_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v4i32" - )] - fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _veor3q_s32(a, b, c) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v4i32" - )] - fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = _veor3q_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v2i64" - )] - fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _veor3q_s64(a, b, c) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3s.v2i64" - )] - fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = _veor3q_s64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v16i8" - )] - fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v16i8" - )] - fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v8i16" - )] - fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v8i16" - )] - fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = - _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v4i32" - )] - fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v4i32" - )] - fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v2i64" - )] - fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "Three-way exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -#[cfg_attr(test, assert_instr(eor3))] -pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.eor3u.v2i64" - )] - fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = - _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } -} -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: float64x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } -} -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ext, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: poly64x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmadd))] -pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.v1f64" - )] - fn _vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - } - _vfma_f64(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, -) -> float64x1_t { - static_assert!(LANE == 0); - vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfma_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmadd))] -pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { - vfma_f64(a, b, vdup_n_f64(c)) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f64" - )] - fn _vfmad_lane_f64(a: f64, b: f64, c: f64) -> f64; - } - static_assert!(LANE == 0); - let c: f64 = simd_extract!(c, LANE as u32); - _vfmad_lane_f64(b, c, a) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.v2f64" - )] - fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - _vfmaq_f64(b, c, a) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.v2f64" - )] - fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = _vfmaq_f64(b, c, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmla, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmaq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - vfmaq_f64(a, b, vdupq_n_f64(c)) -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmla))] -pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = vfmaq_f64(a, b, vdupq_n_f64(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 1); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_lane_f32(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 1); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_lane_f32(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 2); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_laneq_f32(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f32" - )] - fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; - } - static_assert_uimm_bits!(LANE, 2); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: f32 = simd_extract!(c, LANE as u32); - _vfmas_laneq_f32(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f64" - )] - fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; - } - static_assert_uimm_bits!(LANE, 1); - let c: f64 = simd_extract!(c, LANE as u32); - _vfmad_laneq_f64(b, c, a) -} -#[doc = "Floating-point fused multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fma.f64" - )] - fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; - } - static_assert_uimm_bits!(LANE, 1); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let c: f64 = simd_extract!(c, LANE as u32); - _vfmad_laneq_f64(b, c, a) -} -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - let b: float64x1_t = simd_neg(b); - vfma_f64(a, b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, -) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, -) -> float64x1_t { - static_assert!(LANE == 0); - vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, -) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { - vfms_f64(a, b, vdup_n_f64(c)) -} -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let b: float64x2_t = simd_neg(b); - vfmaq_f64(a, b, c) -} -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let b: float64x2_t = simd_neg(b); - let ret_val: float64x2_t = vfmaq_f64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, -) -> float64x2_t { - static_assert!(LANE == 0); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - vfmsq_f64(a, b, vdupq_n_f64(c)) -} -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmls))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = vfmsq_f64(a, b, vdupq_n_f64(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - vfmas_lane_f32::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - vfmas_lane_f32::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - vfmas_laneq_f32::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vfmas_laneq_f32::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { - vfmad_lane_f64::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - vfmad_laneq_f64::(a, -b, c) -} -#[doc = "Floating-point fused multiply-subtract to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - vfmad_laneq_f64::(a, -b, c) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - let ret_val: float32x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - let ret_val: float32x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { - let ret_val: float64x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - let ret_val: int8x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - let ret_val: int8x16_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - let ret_val: int16x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - let ret_val: int16x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - let ret_val: int32x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - let ret_val: int32x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - let ret_val: int64x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - let ret_val: uint8x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - let ret_val: uint8x16_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - let ret_val: uint16x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - let ret_val: uint16x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - let ret_val: uint32x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - let ret_val: uint32x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - let ret_val: uint64x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - let ret_val: poly8x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - let ret_val: poly8x16_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - let ret_val: poly16x4_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - let ret_val: poly16x8_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - crate::ptr::read_unaligned(ptr.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ldr))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - let ret_val: poly64x2_t = crate::ptr::read_unaligned(ptr.cast()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" - )] - fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t; - } - _vld1_f64_x2(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" - )] - fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t; - } - _vld1_f64_x3(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" - )] - fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t; - } - _vld1_f64_x4(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" - )] - fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; - } - _vld1q_f64_x2(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" - )] - fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; - } - let mut ret_val: float64x2x2_t = _vld1q_f64_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" - )] - fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; - } - _vld1q_f64_x3(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" - )] - fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; - } - let mut ret_val: float64x2x3_t = _vld1q_f64_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" - )] - fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; - } - _vld1q_f64_x4(a) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" - )] - fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; - } - let mut ret_val: float64x2x4_t = _vld1q_f64_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" - )] - fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t; - } - _vld2_dup_f64(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" - )] - fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; - } - _vld2q_dup_f64(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" - )] - fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; - } - let mut ret_val: float64x2x2_t = _vld2q_dup_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" - )] - fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; - } - _vld2q_dup_s64(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" - )] - fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; - } - let mut ret_val: int64x2x2_t = _vld2q_dup_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" - )] - fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t; - } - _vld2_f64(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" - )] - fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t; - } - _vld2_lane_f64(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" - )] - fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t; - } - _vld2_lane_s64(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { - static_assert!(LANE == 0); - transmute(vld2_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { - static_assert!(LANE == 0); - transmute(vld2_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { - transmute(vld2q_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { - let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { - transmute(vld2q_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { - let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" - )] - fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; - } - _vld2q_f64(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" - )] - fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; - } - let mut ret_val: float64x2x2_t = _vld2q_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" - )] - fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; - } - _vld2q_s64(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" - )] - fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; - } - let mut ret_val: int64x2x2_t = _vld2q_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" - )] - fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) - -> float64x2x2_t; - } - _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" - )] - fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) - -> float64x2x2_t; - } - let mut b: float64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: float64x2x2_t = _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" - )] - fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; - } - _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" - )] - fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; - } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: int8x16x2_t = _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" - )] - fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; - } - _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" - )] - fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; - } - let mut b: int64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: int64x2x2_t = _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: poly64x2x2_t = transmute(vld2q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld2q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: uint8x16x2_t = transmute(vld2q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: uint64x2x2_t = transmute(vld2q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld2q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: poly8x16x2_t = transmute(vld2q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { - transmute(vld2q_s64(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { - let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { - transmute(vld2q_s64(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { - let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" - )] - fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t; - } - _vld3_dup_f64(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" - )] - fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; - } - _vld3q_dup_f64(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" - )] - fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; - } - let mut ret_val: float64x2x3_t = _vld3q_dup_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" - )] - fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; - } - _vld3q_dup_s64(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" - )] - fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; - } - let mut ret_val: int64x2x3_t = _vld3q_dup_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" - )] - fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t; - } - _vld3_f64(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" - )] - fn _vld3_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - n: i64, - ptr: *const i8, - ) -> float64x1x3_t; - } - _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { - static_assert!(LANE == 0); - transmute(vld3_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" - )] - fn _vld3_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - n: i64, - ptr: *const i8, - ) -> int64x1x3_t; - } - _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { - static_assert!(LANE == 0); - transmute(vld3_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { - transmute(vld3q_dup_s64(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { - let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { - transmute(vld3q_dup_s64(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { - let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" - )] - fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; - } - _vld3q_f64(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" - )] - fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; - } - let mut ret_val: float64x2x3_t = _vld3q_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" - )] - fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; - } - _vld3q_s64(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" - )] - fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; - } - let mut ret_val: int64x2x3_t = _vld3q_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" - )] - fn _vld3q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x3_t; - } - _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" - )] - fn _vld3q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x3_t; - } - let mut b: float64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: float64x2x3_t = _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: poly64x2x3_t = transmute(vld3q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" - )] - fn _vld3q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x3_t; - } - _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" - )] - fn _vld3q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x3_t; - } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: int8x16x3_t = _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" - )] - fn _vld3q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x3_t; - } - _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" - )] - fn _vld3q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x3_t; - } - let mut b: int64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: int64x2x3_t = _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld3q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: uint8x16x3_t = transmute(vld3q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: uint64x2x3_t = transmute(vld3q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld3q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: poly8x16x3_t = transmute(vld3q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { - transmute(vld3q_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { - let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { - transmute(vld3q_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { - let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" - )] - fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t; - } - _vld4_dup_f64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" - )] - fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; - } - _vld4q_dup_f64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" - )] - fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; - } - let mut ret_val: float64x2x4_t = _vld4q_dup_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" - )] - fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; - } - _vld4q_dup_s64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" - )] - fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; - } - let mut ret_val: int64x2x4_t = _vld4q_dup_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" - )] - fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t; - } - _vld4_f64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" - )] - fn _vld4_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - n: i64, - ptr: *const i8, - ) -> float64x1x4_t; - } - _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" - )] - fn _vld4_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - n: i64, - ptr: *const i8, - ) -> int64x1x4_t; - } - _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { - static_assert!(LANE == 0); - transmute(vld4_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { - static_assert!(LANE == 0); - transmute(vld4_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { - transmute(vld4q_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { - let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { - transmute(vld4q_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { - let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" - )] - fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; - } - _vld4q_f64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" - )] - fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; - } - let mut ret_val: float64x2x4_t = _vld4q_f64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" - )] - fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; - } - _vld4q_s64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" - )] - fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; - } - let mut ret_val: int64x2x4_t = _vld4q_s64(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" - )] - fn _vld4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x4_t; - } - _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" - )] - fn _vld4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *const i8, - ) -> float64x2x4_t; - } - let mut b: float64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: float64x2x4_t = _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" - )] - fn _vld4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x4_t; - } - _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" - )] - fn _vld4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *const i8, - ) -> int8x16x4_t; - } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: int8x16x4_t = _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" - )] - fn _vld4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x4_t; - } - _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" - )] - fn _vld4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *const i8, - ) -> int64x2x4_t; - } - let mut b: int64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: int64x2x4_t = _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: poly64x2x4_t = transmute(vld4q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld4q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: uint8x16x4_t = transmute(vld4q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4q_lane_s64::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: uint64x2x4_t = transmute(vld4q_lane_s64::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - transmute(vld4q_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let mut ret_val: poly8x16x4_t = transmute(vld4q_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { - transmute(vld4q_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { - let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { - transmute(vld4q_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { - let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmax))] -pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v1f64" - )] - fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vmax_f64(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmax))] -pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f64" - )] - fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vmaxq_f64(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmax))] -pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f64" - )] - fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vmaxq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnm))] -pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v1f64" - )] - fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vmaxnm_f64(a, b) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnm))] -pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f64" - )] - fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vmaxnmq_f64(a, b) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnm))] -pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f64" - )] - fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vmaxnmq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vmaxnmv_f32(a: float32x2_t) -> f32; - } - _vmaxnmv_f32(a) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vmaxnmv_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxnmv_f32(a) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vmaxnmvq_f64(a: float64x2_t) -> f64; - } - _vmaxnmvq_f64(a) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vmaxnmvq_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxnmvq_f64(a) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmv))] -pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" - )] - fn _vmaxnmvq_f32(a: float32x4_t) -> f32; - } - _vmaxnmvq_f32(a) -} -#[doc = "Floating-point maximum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxnmv))] -pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" - )] - fn _vmaxnmvq_f32(a: float32x4_t) -> f32; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxnmvq_f32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" - )] - fn _vmaxv_f32(a: float32x2_t) -> f32; - } - _vmaxv_f32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" - )] - fn _vmaxv_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxv_f32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxv))] -pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" - )] - fn _vmaxvq_f32(a: float32x4_t) -> f32; - } - _vmaxvq_f32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxv))] -pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" - )] - fn _vmaxvq_f32(a: float32x4_t) -> f32; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxvq_f32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" - )] - fn _vmaxvq_f64(a: float64x2_t) -> f64; - } - _vmaxvq_f64(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" - )] - fn _vmaxvq_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxvq_f64(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" - )] - fn _vmaxv_s8(a: int8x8_t) -> i8; - } - _vmaxv_s8(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" - )] - fn _vmaxv_s8(a: int8x8_t) -> i8; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vmaxv_s8(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" - )] - fn _vmaxvq_s8(a: int8x16_t) -> i8; - } - _vmaxvq_s8(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" - )] - fn _vmaxvq_s8(a: int8x16_t) -> i8; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - _vmaxvq_s8(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" - )] - fn _vmaxv_s16(a: int16x4_t) -> i16; - } - _vmaxv_s16(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" - )] - fn _vmaxv_s16(a: int16x4_t) -> i16; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxv_s16(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" - )] - fn _vmaxvq_s16(a: int16x8_t) -> i16; - } - _vmaxvq_s16(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" - )] - fn _vmaxvq_s16(a: int16x8_t) -> i16; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vmaxvq_s16(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" - )] - fn _vmaxv_s32(a: int32x2_t) -> i32; - } - _vmaxv_s32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" - )] - fn _vmaxv_s32(a: int32x2_t) -> i32; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxv_s32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" - )] - fn _vmaxvq_s32(a: int32x4_t) -> i32; - } - _vmaxvq_s32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxv))] -pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" - )] - fn _vmaxvq_s32(a: int32x4_t) -> i32; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxvq_s32(a) -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" - )] - fn _vmaxv_u8(a: int8x8_t) -> i8; - } - _vmaxv_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" - )] - fn _vmaxv_u8(a: int8x8_t) -> i8; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vmaxv_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" - )] - fn _vmaxvq_u8(a: int8x16_t) -> i8; - } - _vmaxvq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" - )] - fn _vmaxvq_u8(a: int8x16_t) -> i8; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - _vmaxvq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" - )] - fn _vmaxv_u16(a: int16x4_t) -> i16; - } - _vmaxv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" - )] - fn _vmaxv_u16(a: int16x4_t) -> i16; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" - )] - fn _vmaxvq_u16(a: int16x8_t) -> i16; - } - _vmaxvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" - )] - fn _vmaxvq_u16(a: int16x8_t) -> i16; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vmaxvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" - )] - fn _vmaxv_u32(a: int32x2_t) -> i32; - } - _vmaxv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" - )] - fn _vmaxv_u32(a: int32x2_t) -> i32; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - _vmaxv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" - )] - fn _vmaxvq_u32(a: int32x4_t) -> i32; - } - _vmaxvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector max."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxv))] -pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" - )] - fn _vmaxvq_u32(a: int32x4_t) -> i32; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vmaxvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmin))] -pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v1f64" - )] - fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vmin_f64(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmin))] -pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f64" - )] - fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vminq_f64(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmin))] -pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f64" - )] - fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vminq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminnm))] -pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v1f64" - )] - fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vminnm_f64(a, b) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminnm))] -pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f64" - )] - fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vminnmq_f64(a, b) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminnm))] -pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f64" - )] - fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vminnmq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vminnmv_f32(a: float32x2_t) -> f32; - } - _vminnmv_f32(a) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vminnmv_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vminnmv_f32(a) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vminnmvq_f64(a: float64x2_t) -> f64; - } - _vminnmvq_f64(a) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vminnmvq_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vminnmvq_f64(a) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" - )] - fn _vminnmvq_f32(a: float32x4_t) -> f32; - } - _vminnmvq_f32(a) -} -#[doc = "Floating-point minimum number across vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmv))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" - )] - fn _vminnmvq_f32(a: float32x4_t) -> f32; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminnmvq_f32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v2f32" - )] - fn _vminv_f32(a: float32x2_t) -> f32; - } - _vminv_f32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v2f32" - )] - fn _vminv_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vminv_f32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminv))] -pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v4f32" - )] - fn _vminvq_f32(a: float32x4_t) -> f32; - } - _vminvq_f32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminv))] -pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v4f32" - )] - fn _vminvq_f32(a: float32x4_t) -> f32; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminvq_f32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f64.v2f64" - )] - fn _vminvq_f64(a: float64x2_t) -> f64; - } - _vminvq_f64(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f64.v2f64" - )] - fn _vminvq_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vminvq_f64(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v8i8" - )] - fn _vminv_s8(a: int8x8_t) -> i8; - } - _vminv_s8(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v8i8" - )] - fn _vminv_s8(a: int8x8_t) -> i8; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vminv_s8(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v16i8" - )] - fn _vminvq_s8(a: int8x16_t) -> i8; - } - _vminvq_s8(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v16i8" - )] - fn _vminvq_s8(a: int8x16_t) -> i8; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - _vminvq_s8(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v4i16" - )] - fn _vminv_s16(a: int16x4_t) -> i16; - } - _vminv_s16(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v4i16" - )] - fn _vminv_s16(a: int16x4_t) -> i16; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminv_s16(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v8i16" - )] - fn _vminvq_s16(a: int16x8_t) -> i16; - } - _vminvq_s16(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v8i16" - )] - fn _vminvq_s16(a: int16x8_t) -> i16; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vminvq_s16(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v2i32" - )] - fn _vminv_s32(a: int32x2_t) -> i32; - } - _vminv_s32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v2i32" - )] - fn _vminv_s32(a: int32x2_t) -> i32; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - _vminv_s32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v4i32" - )] - fn _vminvq_s32(a: int32x4_t) -> i32; - } - _vminvq_s32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminv))] -pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v4i32" - )] - fn _vminvq_s32(a: int32x4_t) -> i32; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminvq_s32(a) -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v8i8" - )] - fn _vminv_u8(a: int8x8_t) -> i8; - } - _vminv_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v8i8" - )] - fn _vminv_u8(a: int8x8_t) -> i8; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vminv_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v16i8" - )] - fn _vminvq_u8(a: int8x16_t) -> i8; - } - _vminvq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v16i8" - )] - fn _vminvq_u8(a: int8x16_t) -> i8; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - _vminvq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v4i16" - )] - fn _vminv_u16(a: int16x4_t) -> i16; - } - _vminv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v4i16" - )] - fn _vminv_u16(a: int16x4_t) -> i16; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminv_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v8i16" - )] - fn _vminvq_u16(a: int16x8_t) -> i16; - } - _vminvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v8i16" - )] - fn _vminvq_u16(a: int16x8_t) -> i16; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - _vminvq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v2i32" - )] - fn _vminv_u32(a: int32x2_t) -> i32; - } - _vminv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v2i32" - )] - fn _vminv_u32(a: int32x2_t) -> i32; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - _vminv_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v4i32" - )] - fn _vminvq_u32(a: int32x4_t) -> i32; - } - _vminvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Horizontal vector min."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminv))] -pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v4i32" - )] - fn _vminvq_u32(a: int32x4_t) -> i32; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - _vminvq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlal_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlal_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlal_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlal_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vmlal_high_s16(a, b, vdupq_n_s16(c)) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlal_high_s16(a, b, vdupq_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vmlal_high_s32(a, b, vdupq_n_s32(c)) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlal_high_s32(a, b, vdupq_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - vmlal_high_u16(a, b, vdupq_n_u16(c)) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlal_high_u16(a, b, vdupq_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - vmlal_high_u32(a, b, vdupq_n_u32(c)) -} -#[doc = "Multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlal_high_u32(a, b, vdupq_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlal_s8(a, b, c) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = vmlal_s8(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlal_s16(a, b, c) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let ret_val: int32x4_t = vmlal_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlal_s32(a, b, c) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - let ret_val: int64x2_t = vmlal_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlal_u8(a, b, c) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = vmlal_u8(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlal_u16(a, b, c) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlal_u16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlal_u32(a, b, c) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - let ret_val: uint64x2_t = vmlal_u32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float64x2_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlsl_high_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vmlsl_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlsl_high_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlsl_high_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlsl_high_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vmlsl_high_s16(a, b, vdupq_n_s16(c)) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlsl_high_s16(a, b, vdupq_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vmlsl_high_s32(a, b, vdupq_n_s32(c)) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlsl_high_s32(a, b, vdupq_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - vmlsl_high_u16(a, b, vdupq_n_u16(c)) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlsl_high_u16(a, b, vdupq_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - vmlsl_high_u32(a, b, vdupq_n_u32(c)) -} -#[doc = "Multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlsl_high_u32(a, b, vdupq_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlsl_s8(a, b, c) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = vmlsl_s8(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlsl_s16(a, b, c) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let ret_val: int32x4_t = vmlsl_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlsl_s32(a, b, c) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); - let ret_val: int64x2_t = vmlsl_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - vmlsl_u8(a, b, c) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = vmlsl_u8(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - vmlsl_u16(a, b, c) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlsl_u16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - vmlsl_u32(a, b, c) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umlsl2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); - let ret_val: uint64x2_t = vmlsl_u32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vmovl_s8(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = vmovl_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vmovl_s16(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let ret_val: int32x4_t = vmovl_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - vmovl_s32(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sxtl2))] -pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let ret_val: int64x2_t = vmovl_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vmovl_u8(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = vmovl_u8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vmovl_u16(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let ret_val: uint32x4_t = vmovl_u16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - vmovl_u32(a) -} -#[doc = "Vector move"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uxtl2))] -pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let ret_val: uint64x2_t = vmovl_u32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - let c: int8x8_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_cast(b); - let ret_val: int8x16_t = - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - let c: int16x4_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_cast(b); - let ret_val: int16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - let c: int32x2_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_cast(b); - let ret_val: int32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - let c: uint8x8_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_cast(b); - let ret_val: uint8x16_t = - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - let c: uint16x4_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_cast(b); - let ret_val: uint16x8_t = simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - let c: uint32x2_t = simd_cast(b); - simd_shuffle!(a, c, [0, 1, 2, 3]) -} -#[doc = "Extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(xtn2))] -pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_cast(b); - let ret_val: uint32x4_t = simd_shuffle!(a, c, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmul))] -pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmul))] -pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmul))] -pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { - simd_mul(a, vdup_n_f64(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { - simd_mul(a, vdupq_n_f64(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_mul(a, vdupq_n_f64(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE == 0); - let b: f64 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmull_high_s16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmull_high_s32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmull_high_u16( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2, LANE = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmull_high_u32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - vmull_high_s16(a, vdupq_n_s16(b)) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmull_high_s16(a, vdupq_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - vmull_high_s32(a, vdupq_n_s32(b)) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(smull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmull_high_s32(a, vdupq_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { - vmull_high_u16(a, vdupq_n_u16(b)) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmull_high_u16(a, vdupq_n_u16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { - vmull_high_u32(a, vdupq_n_u32(b)) -} -#[doc = "Multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(umull2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmull_high_u32(a, vdupq_n_u32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { - vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_p8(a, b) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly16x8_t = vmull_p8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_s8(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = vmull_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vmull_s16(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: int32x4_t = vmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - vmull_s32(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smull2))] -pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: int64x2_t = vmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - vmull_u8(a, b) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = vmull_u8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vmull_u16(a, b) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: uint32x4_t = vmull_u16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - vmull_u32(a, b) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umull2))] -pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: uint64x2_t = vmull_u32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] -pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull64" - )] - fn _vmull_p64(a: p64, b: p64) -> int8x16_t; - } - transmute(_vmull_p64(a, b)) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - let b: f32 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: f32 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - let b: f32 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: f32 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - let b: f64 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmul, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let b: f64 = simd_extract!(b, LANE as u32); - a * b -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f32" - )] - fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmulx_f32(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f32" - )] - fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vmulx_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v4f32" - )] - fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmulxq_f32(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v4f32" - )] - fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vmulxq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v1f64" - )] - fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - _vmulx_f64(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f64" - )] - fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vmulxq_f64(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.v2f64" - )] - fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vmulxq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x2_t = vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x4_t = vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = vmulxq_f32( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.f64" - )] - fn _vmulxd_f64(a: f64, b: f64) -> f64; - } - _vmulxd_f64(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmulx))] -pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmulx.f32" - )] - fn _vmulxs_f32(a: f32, b: f32) -> f32; - } - _vmulxs_f32(a, b) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE == 0); - vmulxd_f64(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - vmulxd_f64(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - vmulxd_f64(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_uimm_bits!(LANE, 1); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_uimm_bits!(LANE, 2); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - vmulxs_f32(a, simd_extract!(b, LANE as u32)) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply extended"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE == 0); - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fneg))] -pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fneg))] -pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fneg))] -pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(neg))] -pub unsafe fn vnegd_s64(a: i64) -> i64 { - a.wrapping_neg() -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { - let a1: f64 = simd_extract!(a, 0); - let a2: f64 = simd_extract!(a, 1); - a1 + a2 -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let a1: f64 = simd_extract!(a, 0); - let a2: f64 = simd_extract!(a, 1); - a1 + a2 -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { - let a1: f32 = simd_extract!(a, 0); - let a2: f32 = simd_extract!(a, 1); - a1 + a2 -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let a1: f32 = simd_extract!(a, 0); - let a2: f32 = simd_extract!(a, 1); - a1 + a2 -} -#[doc = "Add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { - transmute(vaddvq_u64(transmute(a))) -} -#[doc = "Add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(vaddvq_u64(transmute(a))) -} -#[doc = "Add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { - vaddvq_u64(a) -} -#[doc = "Add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - vaddvq_u64(a) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v4f32" - )] - fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpaddq_f32(a, b) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v4f32" - )] - fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vpaddq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f64" - )] - fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpaddq_f64(a, b) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(faddp))] -pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f64" - )] - fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vpaddq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v16i8" - )] - fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vpaddq_s8(a, b) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v16i8" - )] - fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vpaddq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i16" - )] - fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vpaddq_s16(a, b) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i16" - )] - fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vpaddq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i32" - )] - fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vpaddq_s32(a, b) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i32" - )] - fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vpaddq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i64" - )] - fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vpaddq_s64(a, b) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i64" - )] - fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vpaddq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - transmute(vpaddq_s8(transmute(a), transmute(b))) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - transmute(vpaddq_s16(transmute(a), transmute(b))) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - transmute(vpaddq_s32(transmute(a), transmute(b))) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - transmute(vpaddq_s64(transmute(a), transmute(b))) -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); - let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" - )] - fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpmaxnm_f32(a, b) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" - )] - fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vpmaxnm_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" - )] - fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpmaxnmq_f32(a, b) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" - )] - fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vpmaxnmq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" - )] - fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpmaxnmq_f64(a, b) -} -#[doc = "Floating-point Maximum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" - )] - fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vpmaxnmq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; - } - _vpmaxnmqd_f64(a) -} -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vpmaxnmqd_f64(a) -} -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vpmaxnms_f32(a: float32x2_t) -> f32; - } - _vpmaxnms_f32(a) -} -#[doc = "Floating-point maximum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fmaxnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vpmaxnms_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpmaxnms_f32(a) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v4f32" - )] - fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpmaxq_f32(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v4f32" - )] - fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vpmaxq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f64" - )] - fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpmaxq_f64(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f64" - )] - fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vpmaxq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v16i8" - )] - fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vpmaxq_s8(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v16i8" - )] - fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vpmaxq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i16" - )] - fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vpmaxq_s16(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i16" - )] - fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vpmaxq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i32" - )] - fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vpmaxq_s32(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i32" - )] - fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vpmaxq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v16i8" - )] - fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v16i8" - )] - fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i16" - )] - fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i16" - )] - fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i32" - )] - fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(umaxp))] -pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i32" - )] - fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" - )] - fn _vpmaxqd_f64(a: float64x2_t) -> f64; - } - _vpmaxqd_f64(a) -} -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" - )] - fn _vpmaxqd_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vpmaxqd_f64(a) -} -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" - )] - fn _vpmaxs_f32(a: float32x2_t) -> f32; - } - _vpmaxs_f32(a) -} -#[doc = "Floating-point maximum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmaxp))] -pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" - )] - fn _vpmaxs_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpmaxs_f32(a) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f32" - )] - fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpminnm_f32(a, b) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f32" - )] - fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vpminnm_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v4f32" - )] - fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpminnmq_f32(a, b) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v4f32" - )] - fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vpminnmq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f64" - )] - fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpminnmq_f64(a, b) -} -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f64" - )] - fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vpminnmq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vpminnmqd_f64(a: float64x2_t) -> f64; - } - _vpminnmqd_f64(a) -} -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vpminnmqd_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vpminnmqd_f64(a) -} -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vpminnms_f32(a: float32x2_t) -> f32; - } - _vpminnms_f32(a) -} -#[doc = "Floating-point minimum number pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vpminnms_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpminnms_f32(a) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v4f32" - )] - fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vpminq_f32(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v4f32" - )] - fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vpminq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f64" - )] - fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - _vpminq_f64(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f64" - )] - fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vpminq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v16i8" - )] - fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vpminq_s8(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v16i8" - )] - fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vpminq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i16" - )] - fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vpminq_s16(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i16" - )] - fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vpminq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i32" - )] - fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vpminq_s32(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sminp))] -pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i32" - )] - fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vpminq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v16i8" - )] - fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v16i8" - )] - fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i16" - )] - fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i16" - )] - fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i32" - )] - fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i32" - )] - fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f64.v2f64" - )] - fn _vpminqd_f64(a: float64x2_t) -> f64; - } - _vpminqd_f64(a) -} -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f64.v2f64" - )] - fn _vpminqd_f64(a: float64x2_t) -> f64; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - _vpminqd_f64(a) -} -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v2f32" - )] - fn _vpmins_f32(a: float32x2_t) -> f32; - } - _vpmins_f32(a) -} -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminv.f32.v2f32" - )] - fn _vpmins_f32(a: float32x2_t) -> f32; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpmins_f32(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v1i64" - )] - fn _vqabs_s64(a: int64x1_t) -> int64x1_t; - } - _vqabs_s64(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i64" - )] - fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; - } - _vqabsq_s64(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i64" - )] - fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vqabsq_s64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsb_s8(a: i8) -> i8 { - simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) -} -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsh_s16(a: i16) -> i16 { - simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) -} -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabss_s32(a: i32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.i32" - )] - fn _vqabss_s32(a: i32) -> i32; - } - _vqabss_s32(a) -} -#[doc = "Signed saturating absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] -pub unsafe fn vqabsd_s64(a: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.i64" - )] - fn _vqabsd_s64(a: i64) -> i64; - } - _vqabsd_s64(a) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqadd_s8(a, b), 0) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqadd_s16(a, b), 0) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: uint8x8_t = vdup_n_u8(b); - simd_extract!(vqadd_u8(a, b), 0) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: uint16x4_t = vdup_n_u16(b); - simd_extract!(vqadd_u16(a, b), 0) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.i32" - )] - fn _vqadds_s32(a: i32, b: i32) -> i32; - } - _vqadds_s32(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqadd))] -pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.i64" - )] - fn _vqaddd_s64(a: i64, b: i64) -> i64; - } - _vqaddd_s64(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { +pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.i32" + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64" )] - fn _vqadds_u32(a: i32, b: i32) -> i32; + fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t; } - _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vcvt_n_s64_f64(a, N) } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqadd))] -pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { +pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.i64" + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64" )] - fn _vqaddd_u64(a: i64, b: i64) -> i64; - } - _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vqaddq_s32(a, vqdmull_high_n_s16(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_n_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - vqaddq_s32(a, vqdmull_high_s16(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_high_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vqaddq_s64(a, vqdmull_high_n_s32(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_n_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - vqaddq_s64(a, vqdmull_high_s32(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_high_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_laneq_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + _vcvtq_n_s64_f64(a, N) } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) +pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" + )] + fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> int64x1_t; + } + _vcvt_n_u64_f64(a, N).as_unsigned() } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_laneq_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" + )] + fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; + } + _vcvtq_n_u64_f64(a, N).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v1i64.v1f64" + )] + fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvt_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i64.v2f64" + )] + fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtq_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v1i64.v1f64" + )] + fn _vcvt_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvt_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i64.v2f64" + )] + fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtq_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32" + )] + fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvta_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32" + )] + fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtaq_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64" + )] + fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvta_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64" + )] + fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtaq_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal))] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { - let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); - vqadds_s32(a, simd_extract!(x, 0)) +pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" + )] + fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvta_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal))] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { - let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c)); - x as i64 +pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" + )] + fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtaq_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) +pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" + )] + fn _vcvta_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvta_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" + )] + fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtaq_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) +pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.i32.f32" + )] + fn _vcvtas_s32_f32(a: f32) -> i32; + } + _vcvtas_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtas.i64.f64" + )] + fn _vcvtad_s64_f64(a: f64) -> i64; + } + _vcvtad_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) +pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.i32.f32" + )] + fn _vcvtas_u32_f32(a: f32) -> i32; + } + _vcvtas_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] +#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtau.i64.f64" + )] + fn _vcvtad_u64_f64(a: f64) -> i64; + } + _vcvtad_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) +pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 { + a as f64 } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvts_f32_s32(a: i32) -> f32 { + a as f32 } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - vqsubq_s32(a, vqdmull_high_n_s16(b, c)) +pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32" + )] + fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtm_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_n_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32" + )] + fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtmq_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - vqsubq_s32(a, vqdmull_high_s16(b, c)) +pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64" + )] + fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtm_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_high_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64" + )] + fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtmq_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - vqsubq_s64(a, vqdmull_high_n_s32(b, c)) +pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" + )] + fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtm_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_n_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" + )] + fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtmq_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - vqsubq_s64(a, vqdmull_high_s32(b, c)) +pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" + )] + fn _vcvtm_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtm_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl2))] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_high_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" + )] + fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtmq_u64_f64(a).as_unsigned() } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) +pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.i32.f32" + )] + fn _vcvtms_s32_f32(a: f32) -> i32; + } + _vcvtms_s32_f32(a) } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_laneq_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtms.i64.f64" + )] + fn _vcvtmd_s64_f64(a: f64) -> i64; + } + _vcvtmd_s64_f64(a) } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) +pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" + )] + fn _vcvtms_u32_f32(a: f32) -> i32; + } + _vcvtms_u32_f32(a).as_unsigned() } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_laneq_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" + )] + fn _vcvtmd_u64_f64(a: f64) -> i64; + } + _vcvtmd_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32" + )] + fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtn_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32" + )] + fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtnq_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64" + )] + fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtn_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64" + )] + fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtnq_s64_f64(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" + )] + fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtn_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" + )] + fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtnq_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" + )] + fn _vcvtn_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtn_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] -#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) +pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" + )] + fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtnq_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl))] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { - let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); - vqsubs_s32(a, simd_extract!(x, 0)) +pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.i32.f32" + )] + fn _vcvtns_s32_f32(a: f32) -> i32; + } + _vcvtns_s32_f32(a) } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] +#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl))] +#[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { - let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c)); - x as i64 +pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtns.i64.f64" + )] + fn _vcvtnd_s64_f64(a: f64) -> i64; + } + _vcvtnd_s64_f64(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) +pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" + )] + fn _vcvtns_u32_f32(a: f32) -> i32; + } + _vcvtns_u32_f32(a).as_unsigned() } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" + )] + fn _vcvtnd_u64_f64(a: f64) -> i64; + } + _vcvtnd_u64_f64(a).as_unsigned() } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) +pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32" + )] + fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtp_s32_f32(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32" + )] + fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtpq_s32_f32(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) +pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64" + )] + fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtp_s64_f64(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64" + )] + fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtpq_s64_f64(a) } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) +pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" + )] + fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvtp_u32_f32(a).as_unsigned() } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" + )] + fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtpq_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" + )] + fn _vcvtp_u64_f64(a: float64x1_t) -> int64x1_t; + } + _vcvtp_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(N, 2); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" + )] + fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; + } + _vcvtpq_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.i32.f32" + )] + fn _vcvtps_s32_f32(a: f32) -> i32; + } + _vcvtps_s32_f32(a) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] +#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(N, 3); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16 = simd_extract!(b, N as u32); - vqdmulhh_s16(a, b) +pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtps.i64.f64" + )] + fn _vcvtpd_s64_f64(a: f64) -> i64; + } + _vcvtpd_s64_f64(a) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh))] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqdmulh_s16(a, b), 0) +pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" + )] + fn _vcvtps_u32_f32(a: f32) -> i32; + } + _vcvtps_u32_f32(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] +#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh))] +#[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - simd_extract!(vqdmulh_s32(a, b), 0) +pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" + )] + fn _vcvtpd_u64_f64(a: f64) -> i64; + } + _vcvtpd_u64_f64(a).as_unsigned() } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vcvts_f32_u32(a: u32) -> f32 { + a as f32 } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_uimm_bits!(N, 1); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 { + a as f64 } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32" + )] + fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32; + } + _vcvts_n_f32_s32(a, N) } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: i32 = simd_extract!(b, N as u32); - vqdmulhs_s32(a, b) +pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64" + )] + fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64; + } + _vcvtd_n_f64_s64(a, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" + )] + fn _vcvts_n_f32_u32(a: i32, n: i32) -> f32; + } + _vcvts_n_f32_u32(a.as_signed(), N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" + )] + fn _vcvtd_n_f64_u64(a: i64, n: i32) -> f64; + } + _vcvtd_n_f64_u64(a.as_signed(), N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32" + )] + fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32; + } + _vcvts_n_s32_f32(a, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64" + )] + fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64; + } + _vcvtd_n_s64_f64(a, N) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" + )] + fn _vcvts_n_u32_f32(a: f32, n: i32) -> i32; + } + _vcvts_n_u32_f32(a, N).as_unsigned() } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" + )] + fn _vcvtd_n_u64_f64(a: f64, n: i32) -> i64; + } + _vcvtd_n_u64_f64(a, N).as_unsigned() } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +pub unsafe fn vcvts_s32_f32(a: f32) -> i32 { + a as i32 } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 { + a as i64 } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = vdup_n_s16(b); - vqdmull_s16(a, b) +pub unsafe fn vcvts_u32_f32(a: f32) -> u32 { + a as u32 } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = vdup_n_s16(b); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { + a as u64 } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = vdup_n_s32(b); - vqdmull_s32(a, b) +pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64" + )] + fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; + } + _vcvtx_f32_f64(a) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = vdup_n_s32(b); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { + simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] +#[doc = "Floating-point convert to lower precision narrow, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] +#[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - vqdmull_s16(a, b) +pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 { + simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_div(a, b) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - vqdmull_s32(a, b) +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_div(a, b) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_div(a, b) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) +#[cfg_attr(test, assert_instr(fdiv))] +pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_div(a, b) +} +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_s32(a, b, transmute(c)) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(N, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(sdot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_s32(a, b, transmute(c)) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdot_laneq_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x16_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_u32(a, b, transmute(c)) } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(N, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(test, assert_instr(udot, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] +pub unsafe fn vdotq_laneq_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x16_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_u32(a, b, transmute(c)) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) +pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { + static_assert!(N == 0); + a } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { - static_assert_uimm_bits!(N, 2); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) +pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { + static_assert!(N == 0); + a } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { - static_assert_uimm_bits!(N, 2); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { - static_assert_uimm_bits!(N, 2); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { +pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { static_assert_uimm_bits!(N, 3); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) + simd_extract!(a, N as u32) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 4))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { +pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { static_assert_uimm_bits!(N, 3); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: i16 = simd_extract!(b, N as u32); - vqdmullh_s16(a, b) + simd_extract!(a, N as u32) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull))] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqdmull_s16(a, b), 0) +pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { - static_assert_uimm_bits!(N, 1); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: i32 = simd_extract!(b, N as u32); - vqdmulls_s32(a, b) +pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmull))] +#[cfg_attr(test, assert_instr(nop, N = 4))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulls.scalar" - )] - fn _vqdmulls_s32(a: i32, b: i32) -> i64; - } - _vqdmulls_s32(a, b) +pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { + static_assert_uimm_bits!(N, 3); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - simd_shuffle!( - a, - vqmovn_s16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - vqmovn_s16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 8))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { + static_assert_uimm_bits!(N, 4); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) +pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { + static_assert!(N == 0); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - simd_shuffle!( - a, - vqmovn_u16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqmovn_u16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) +pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn2))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnd_s64(a: i64) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" - )] - fn _vqmovnd_s64(a: i64) -> i32; - } - _vqmovnd_s64(a) +pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnd_u64(a: u64) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" - )] - fn _vqmovnd_u64(a: i64) -> i32; - } - _vqmovnd_u64(a.as_signed()).as_unsigned() +pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnh_s16(a: i16) -> i8 { - simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) +pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 1))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovns_s32(a: i32) -> i16 { - simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) +pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { + static_assert_uimm_bits!(N, 1); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovnh_u16(a: u16) -> u8 { - simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) +pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqxtn))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovns_u32(a: u32) -> u16 { - simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) +pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - simd_shuffle!( - a, - vqmovun_s16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqmovun_s16(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] +#[cfg_attr(test, assert_instr(nop, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { + static_assert_uimm_bits!(N, 2); + simd_extract!(a, N as u32) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v16i8" + )] + fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _veor3q_s8(a, b, c) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v8i16" + )] + fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + _veor3q_s16(a, b, c) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovunh_s16(a: i16) -> u8 { - simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v4i32" + )] + fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _veor3q_s32(a, b, c) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovuns_s32(a: i32) -> u16 { - simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3s.v2i64" + )] + fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + _veor3q_s64(a, b, c) } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqxtun))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqmovund_s64(a: i64) -> u32 { - simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.eor3u.v16i8" + )] + fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v1i64" + link_name = "llvm.aarch64.crypto.eor3u.v8i16" )] - fn _vqneg_s64(a: int64x1_t) -> int64x1_t; + fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } - _vqneg_s64(a) + _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i64" + link_name = "llvm.aarch64.crypto.eor3u.v4i32" )] - fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; + fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vqnegq_s64(a) + _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] +#[doc = "Three-way exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { +#[target_feature(enable = "neon,sha3")] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +#[cfg_attr(test, assert_instr(eor3))] +pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i64" + link_name = "llvm.aarch64.crypto.eor3u.v2i64" )] - fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; + fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = _vqnegq_s64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegb_s8(a: i8) -> i8 { - simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) +pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegh_s16(a: i16) -> i16 { - simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) +pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegs_s32(a: i32) -> i32 { - simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) +#[cfg_attr(test, assert_instr(fmadd))] +pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.v1f64" + )] + fn _vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; + } + _vfma_f64(b, c, a) } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqneg))] -pub unsafe fn vqnegd_s64(a: i64) -> i64 { - simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) +pub unsafe fn vfma_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlah_s16(a, b, c) + vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) +} +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlah_s32(a, b, c) + vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, +) -> float64x1_t { + static_assert!(LANE == 0); + vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlah_s16(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfma_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmadd))] +pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + vfma_f64(a, b, vdup_n_f64(c)) +} +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmlah_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f64" + )] + fn _vfmad_lane_f64(a: f64, b: f64, c: f64) -> f64; + } + static_assert!(LANE == 0); + let c: f64 = simd_extract!(c, LANE as u32); + _vfmad_lane_f64(b, c, a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.v2f64" + )] + fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + _vfmaq_f64(b, c, a) +} +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlah_s32(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmaq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmlah_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fmla))] +pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + vfmaq_f64(a, b, vdupq_n_f64(c)) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlahq_s16(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32; + } + static_assert_uimm_bits!(LANE, 1); + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_lane_f32(b, c, a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f32" + )] + fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32; + } static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + let c: f32 = simd_extract!(c, LANE as u32); + _vfmas_laneq_f32(b, c, a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] +#[doc = "Floating-point fused multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fma.f64" + )] + fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64; + } static_assert_uimm_bits!(LANE, 1); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlahq_s32(a, b, c) + let c: f64 = simd_extract!(c, LANE as u32); + _vfmad_laneq_f64(b, c, a) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + let b: float64x1_t = simd_neg(b); + vfma_f64(a, b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlahq_s16(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmlahq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlahq_s32(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmlahq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" - )] - fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - _vqrdmlah_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_laneq_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, +) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" - )] - fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrdmlah_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, +) -> float64x1_t { + static_assert!(LANE == 0); + vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" - )] - fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _vqrdmlahq_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_laneq_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, +) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" - )] - fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqrdmlahq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + vfms_f64(a, b, vdup_n_f64(c)) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" - )] - fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - _vqrdmlah_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + let b: float64x2_t = simd_neg(b); + vfmaq_f64(a, b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" - )] - fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = _vqrdmlah_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, +) -> float64x2_t { + static_assert!(LANE == 0); + vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" - )] - fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vqrdmlahq_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmls))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + vfmsq_f64(a, b, vdupq_n_f64(c)) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" - )] - fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqrdmlahq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { + vfmas_lane_f32::(a, -b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { + vfmas_laneq_f32::(a, -b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { + vfmad_lane_f64::(a, -b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] +#[doc = "Floating-point fused multiply-subtract to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { + vfmad_laneq_f64::(a, -b, c) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - let c: int16x4_t = vdup_n_s16(c); - simd_extract!(vqrdmlah_s16(a, b, c), 0) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlah))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - let c: int32x2_t = vdup_n_s32(c); - simd_extract!(vqrdmlah_s32(a, b, c), 0) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlsh_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlsh_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlsh_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmlsh_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vqrdmlsh_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmlsh_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlshq_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlshq_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmlshq_s16(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmlshq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + crate::ptr::read_unaligned(ptr.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmlshq_s32(a, b, c) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmlshq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + crate::ptr::read_unaligned(ptr.cast()) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + crate::ptr::read_unaligned(ptr.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ldr))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + crate::ptr::read_unaligned(ptr.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" + link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64" )] - fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t; } - _vqrdmlsh_s16(a, b, c) + _vld1_f64_x2(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" + link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64" )] - fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t; } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrdmlsh_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1_f64_x3(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" + link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64" )] - fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t; } - _vqrdmlshq_s16(a, b, c) + _vld1_f64_x4(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" + link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64" )] - fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqrdmlshq_s16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vld1q_f64_x2(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" + link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64" )] - fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; } - _vqrdmlsh_s32(a, b, c) + _vld1q_f64_x3(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" + link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64" )] - fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = _vqrdmlsh_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1q_f64_x4(a) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" + link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64" )] - fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t; } - _vqrdmlshq_s32(a, b, c) + _vld2_dup_f64(a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" + link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64" )] - fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t; } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqrdmlshq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld2q_dup_f64(a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64" + )] + fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t; + } + _vld2q_dup_s64(a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64" + )] + fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t; + } + _vld2_f64(a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8" + )] + fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t; + } + _vld2_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8" + )] + fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t; + } + _vld2_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { + static_assert!(LANE == 0); + transmute(vld2_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { + static_assert!(LANE == 0); + transmute(vld2_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { + transmute(vld2q_dup_s64(transmute(a))) } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] -#[rustc_legacy_const_generics(3)] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) -} -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - let c: int16x4_t = vdup_n_s16(c); - simd_extract!(vqrdmlsh_s16(a, b, c), 0) +#[target_feature(enable = "neon,aes")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "rdm")] -#[cfg_attr(test, assert_instr(sqrdmlsh))] -#[stable(feature = "rdm_intrinsics", since = "1.62.0")] -pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { - let a: int32x2_t = vdup_n_s32(a); - let b: int32x2_t = vdup_n_s32(b); - let c: int32x2_t = vdup_n_s32(c); - simd_extract!(vqrdmlsh_s32(a, b, c), 0) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { + transmute(vld2q_dup_s64(transmute(a))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_uimm_bits!(LANE, 2); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64" + )] + fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t; + } + _vld2q_f64(a as _) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64" + )] + fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t; + } + _vld2q_s64(a as _) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_uimm_bits!(LANE, 3); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8" + )] + fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) + -> float64x2x2_t; + } + _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_uimm_bits!(LANE, 1); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8" + )] + fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t; + } + _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { +pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { static_assert_uimm_bits!(LANE, 1); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8" + )] + fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t; + } + _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_uimm_bits!(LANE, 2); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) +pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { - simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) +pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrdmulh))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { - simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) +pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqrshl_s8(a, b), 0) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { + transmute(vld2q_s64(transmute(a))) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqrshl_s16(a, b), 0) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqrshl_u8(a, b), 0) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { + transmute(vld2q_s64(transmute(a))) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqrshl_u16(a, b), 0) +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.i64" + link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64" )] - fn _vqrshld_s64(a: i64, b: i64) -> i64; + fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t; } - _vqrshld_s64(a, b) + _vld3_dup_f64(a as _) } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.i32" + link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64" )] - fn _vqrshls_s32(a: i32, b: i32) -> i32; + fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t; } - _vqrshls_s32(a, b) + _vld3q_dup_f64(a as _) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.i32" + link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64" )] - fn _vqrshls_u32(a: i32, b: i32) -> i32; + fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t; } - _vqrshls_u32(a.as_signed(), b).as_unsigned() + _vld3q_dup_s64(a as _) } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.i64" + link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64" )] - fn _vqrshld_u64(a: i64, b: i64) -> i64; + fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t; } - _vqrshld_u64(a.as_signed(), b).as_unsigned() + _vld3_f64(a as _) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8" + )] + fn _vld3_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + n: i64, + ptr: *const i8, + ) -> float64x1x3_t; + } + _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - vqrshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { + static_assert!(LANE == 0); + transmute(vld3_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8" + )] + fn _vld3_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + n: i64, + ptr: *const i8, + ) -> int64x1x3_t; + } + _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { + static_assert!(LANE == 0); + transmute(vld3_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { + transmute(vld3q_dup_s64(transmute(a))) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { + transmute(vld3q_dup_s64(transmute(a))) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64" + )] + fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; + } + _vld3q_f64(a as _) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64" + )] + fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; + } + _vld3q_s64(a as _) } -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { - static_assert!(N >= 1 && N <= 32); - let a: uint64x2_t = vdupq_n_u64(a); - simd_extract!(vqrshrn_n_u64::(a), 0) -} -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { - static_assert!(N >= 1 && N <= 8); - let a: uint16x8_t = vdupq_n_u16(a); - simd_extract!(vqrshrn_n_u16::(a), 0) -} -#[doc = "Unsigned saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { - static_assert!(N >= 1 && N <= 16); - let a: uint32x4_t = vdupq_n_u32(a); - simd_extract!(vqrshrn_n_u32::(a), 0) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { - static_assert!(N >= 1 && N <= 8); - let a: int16x8_t = vdupq_n_s16(a); - simd_extract!(vqrshrn_n_s16::(a), 0) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] +pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8" + )] + fn _vld3q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x3_t; + } + _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { - static_assert!(N >= 1 && N <= 16); - let a: int32x4_t = vdupq_n_s32(a); - simd_extract!(vqrshrn_n_s32::(a), 0) +pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { - static_assert!(N >= 1 && N <= 32); - let a: int64x2_t = vdupq_n_s64(a); - simd_extract!(vqrshrn_n_s64::(a), 0) +pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8" + )] + fn _vld3q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x3_t; + } + _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqrshrun_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8" + )] + fn _vld3q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x3_t; + } + _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqrshrun_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { + transmute(vld3q_s64(transmute(a))) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { - static_assert!(N >= 1 && N <= 32); - let a: int64x2_t = vdupq_n_s64(a); - simd_extract!(vqrshrun_n_s64::(a), 0) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { + transmute(vld3q_s64(transmute(a))) } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { - static_assert!(N >= 1 && N <= 8); - let a: int16x8_t = vdupq_n_s16(a); - simd_extract!(vqrshrun_n_s16::(a), 0) +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { - static_assert!(N >= 1 && N <= 16); - let a: int32x4_t = vdupq_n_s32(a); - simd_extract!(vqrshrun_n_s32::(a), 0) +pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64" + )] + fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t; + } + _vld4_dup_f64(a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshl_n_s8::(vdup_n_s8(a)), 0) +pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64" + )] + fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t; + } + _vld4q_dup_f64(a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_n_s64(a: i64) -> i64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshl_n_s64::(vdup_n_s64(a)), 0) +pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64" + )] + fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t; + } + _vld4q_dup_s64(a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshl_n_s16::(vdup_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64" + )] + fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t; + } + _vld4_f64(a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_n_s32(a: i32) -> i32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshl_n_s32::(vdup_n_s32(a)), 0) +pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8" + )] + fn _vld4_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + n: i64, + ptr: *const i8, + ) -> float64x1x4_t; + } + _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshl_n_u8::(vdup_n_u8(a)), 0) +pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8" + )] + fn _vld4_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + n: i64, + ptr: *const i8, + ) -> int64x1x4_t; + } + _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_n_u64(a: u64) -> u64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshl_n_u64::(vdup_n_u64(a)), 0) +pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { + static_assert!(LANE == 0); + transmute(vld4_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshl_n_u16::(vdup_n_u16(a)), 0) +pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { + static_assert!(LANE == 0); + transmute(vld4_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_n_u32(a: u32) -> u32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshl_n_u32::(vdup_n_u32(a)), 0) +pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { + transmute(vld4q_dup_s64(transmute(a))) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { - let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); - simd_extract!(c, 0) +pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { - let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); - simd_extract!(c, 0) +pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { + transmute(vld4q_dup_s64(transmute(a))) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { - let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); - simd_extract!(c, 0) +pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { - let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); - simd_extract!(c, 0) +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64" + )] + fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; + } + _vld4q_f64(a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { - let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); - simd_extract!(c, 0) +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64" + )] + fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; + } + _vld4q_s64(a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { - let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); - simd_extract!(c, 0) +pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8" + )] + fn _vld4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *const i8, + ) -> float64x2x4_t; + } + _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshl))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { +pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.i64" + link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8" )] - fn _vqshld_s64(a: i64, b: i64) -> i64; + fn _vld4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *const i8, + ) -> int8x16x4_t; } - _vqshld_s64(a, b) + _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshl))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { +pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.i64" + link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8" )] - fn _vqshld_u64(a: i64, b: i64) -> i64; + fn _vld4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *const i8, + ) -> int64x2x4_t; } - _vqshld_u64(a.as_signed(), b).as_unsigned() + _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { - static_assert_uimm_bits!(N, 3); - simd_extract!(vqshlu_n_s8::(vdup_n_s8(a)), 0) +pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { - static_assert_uimm_bits!(N, 6); - simd_extract!(vqshlu_n_s64::(vdup_n_s64(a)), 0) +pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { - static_assert_uimm_bits!(N, 4); - simd_extract!(vqshlu_n_s16::(vdup_n_s16(a)), 0) +pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { - static_assert_uimm_bits!(N, 5); - simd_extract!(vqshlu_n_s32::(vdup_n_s32(a)), 0) +pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { + static_assert_uimm_bits!(LANE, 4); + transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { + transmute(vld4q_s64(transmute(a))) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - vqshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { + transmute(vld4q_s64(transmute(a))) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(fmax))] +pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v1f64" + )] + fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmax_f64(a, b) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(fmax))] +pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f64" + )] + fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vmaxq_f64(a, b) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(fmaxnm))] +pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v1f64" + )] + fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmaxnm_f64(a, b) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(fmaxnm))] +pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f64" + )] + fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vmaxnmq_f64(a, b) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vmaxnmv_f32(a: float32x2_t) -> f32; + } + _vmaxnmv_f32(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vmaxnmvq_f64(a: float64x2_t) -> f64; + } + _vmaxnmvq_f64(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] +#[doc = "Floating-point maximum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" + )] + fn _vmaxnmvq_f32(a: float32x4_t) -> f32; + } + _vmaxnmvq_f32(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" + )] + fn _vmaxv_f32(a: float32x2_t) -> f32; + } + _vmaxv_f32(a) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(test, assert_instr(fmaxv))] +pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.i32" + link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32" )] - fn _vqshrnd_n_s64(a: i64, n: i32) -> i32; + fn _vmaxvq_f32(a: float32x4_t) -> f32; } - _vqshrnd_n_s64(a, N) + _vmaxvq_f32(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.i32" + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" )] - fn _vqshrnd_n_u64(a: i64, n: i32) -> i32; + fn _vmaxvq_f64(a: float64x2_t) -> f64; } - _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() + _vmaxvq_f64(a) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrn_n_s16::(vdupq_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" + )] + fn _vmaxv_s8(a: int8x8_t) -> i8; + } + _vmaxv_s8(a) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrn_n_s32::(vdupq_n_s32(a)), 0) +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" + )] + fn _vmaxvq_s8(a: int8x16_t) -> i8; + } + _vmaxvq_s8(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrn_n_u16::(vdupq_n_u16(a)), 0) +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" + )] + fn _vmaxv_s16(a: int16x4_t) -> i16; + } + _vmaxv_s16(a) } -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrn_n_u32::(vdupq_n_u32(a)), 0) +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" + )] + fn _vmaxvq_s16(a: int16x8_t) -> i16; + } + _vmaxvq_s16(a) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vqshrun_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" + )] + fn _vmaxv_s32(a: int32x2_t) -> i32; + } + _vmaxv_s32(a) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vqshrun_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(smaxv))] +pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" + )] + fn _vmaxvq_s32(a: int32x4_t) -> i32; + } + _vmaxvq_s32(a) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" + )] + fn _vmaxv_u8(a: int8x8_t) -> i8; + } + _vmaxv_u8(a.as_signed()).as_unsigned() +} +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" + )] + fn _vmaxvq_u8(a: int8x16_t) -> i8; + } + _vmaxvq_u8(a.as_signed()).as_unsigned() } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" + )] + fn _vmaxv_u16(a: int16x4_t) -> i16; + } + _vmaxv_u16(a.as_signed()).as_unsigned() } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" + )] + fn _vmaxvq_u16(a: int16x8_t) -> i16; + } + _vmaxvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" + )] + fn _vmaxv_u32(a: int32x2_t) -> i32; + } + _vmaxv_u32(a.as_signed()).as_unsigned() } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] +#[doc = "Horizontal vector max."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { - static_assert!(N >= 1 && N <= 32); - simd_extract!(vqshrun_n_s64::(vdupq_n_s64(a)), 0) +#[cfg_attr(test, assert_instr(umaxv))] +pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" + )] + fn _vmaxvq_u32(a: int32x4_t) -> i32; + } + _vmaxvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { - static_assert!(N >= 1 && N <= 8); - simd_extract!(vqshrun_n_s16::(vdupq_n_s16(a)), 0) +#[cfg_attr(test, assert_instr(fmin))] +pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v1f64" + )] + fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vmin_f64(a, b) } -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { - static_assert!(N >= 1 && N <= 16); - simd_extract!(vqshrun_n_s32::(vdupq_n_s32(a)), 0) +#[cfg_attr(test, assert_instr(fmin))] +pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f64" + )] + fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vminq_f64(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { - let a: int8x8_t = vdup_n_s8(a); - let b: int8x8_t = vdup_n_s8(b); - simd_extract!(vqsub_s8(a, b), 0) +#[cfg_attr(test, assert_instr(fminnm))] +pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v1f64" + )] + fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + } + _vminnm_f64(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { - let a: int16x4_t = vdup_n_s16(a); - let b: int16x4_t = vdup_n_s16(b); - simd_extract!(vqsub_s16(a, b), 0) +#[cfg_attr(test, assert_instr(fminnm))] +pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f64" + )] + fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vminnmq_f64(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { - let a: uint8x8_t = vdup_n_u8(a); - let b: uint8x8_t = vdup_n_u8(b); - simd_extract!(vqsub_u8(a, b), 0) +pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vminnmv_f32(a: float32x2_t) -> f32; + } + _vminnmv_f32(a) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { - let a: uint16x4_t = vdup_n_u16(a); - let b: uint16x4_t = vdup_n_u16(b); - simd_extract!(vqsub_u16(a, b), 0) +pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vminnmvq_f64(a: float64x2_t) -> f64; + } + _vminnmvq_f64(a) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] +#[doc = "Floating-point minimum number across vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { +pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.i32" + link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" )] - fn _vqsubs_s32(a: i32, b: i32) -> i32; + fn _vminnmvq_f32(a: float32x4_t) -> f32; } - _vqsubs_s32(a, b) + _vminnmvq_f32(a) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sqsub))] -pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminv_f32(a: float32x2_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.i64" + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" )] - fn _vqsubd_s64(a: i64, b: i64) -> i64; + fn _vminv_f32(a: float32x2_t) -> f32; } - _vqsubd_s64(a, b) + _vminv_f32(a) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { +#[cfg_attr(test, assert_instr(fminv))] +pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.i32" + link_name = "llvm.aarch64.neon.fminv.f32.v4f32" )] - fn _vqsubs_u32(a: i32, b: i32) -> i32; + fn _vminvq_f32(a: float32x4_t) -> f32; } - _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vminvq_f32(a) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uqsub))] -pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.i64" + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" )] - fn _vqsubd_u64(a: i64, b: i64) -> i64; + fn _vminvq_f64(a: float64x2_t) -> f64; } - _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vminvq_f64(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s8(a: int8x8_t) -> i8 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl1.v8i8" + link_name = "llvm.aarch64.neon.sminv.i8.v8i8" )] - fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; + fn _vminv_s8(a: int8x8_t) -> i8; } - _vqtbl1(a, b.as_signed()) + _vminv_s8(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl1.v8i8" + link_name = "llvm.aarch64.neon.sminv.i8.v16i8" )] - fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; + fn _vminvq_s8(a: int8x16_t) -> i8; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbl1(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vminvq_s8(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminv_s16(a: int16x4_t) -> i16 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl1.v16i8" + link_name = "llvm.aarch64.neon.sminv.i16.v4i16" )] - fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vminv_s16(a: int16x4_t) -> i16; } - _vqtbl1q(a, b.as_signed()) + _vminv_s16(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl1.v16i8" + link_name = "llvm.aarch64.neon.sminv.i16.v8i16" )] - fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vminvq_s16(a: int16x8_t) -> i16; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbl1q(a, b.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vminvq_s16(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { - vqtbl1(a, b) +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vminv_s32(a: int32x2_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v2i32" + )] + fn _vminv_s32(a: int32x2_t) -> i32; + } + _vminv_s32(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl1(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(sminv))] +pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminv.i32.v4i32" + )] + fn _vminvq_s32(a: int32x4_t) -> i32; + } + _vminvq_s32(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - vqtbl1q(a, b) +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v8i8" + )] + fn _vminv_u8(a: int8x8_t) -> i8; + } + _vminv_u8(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbl1q(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i8.v16i8" + )] + fn _vminvq_u8(a: int8x16_t) -> i8; + } + _vminvq_u8(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { - let x = transmute(vqtbl1(transmute(a), b)); - x +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v4i16" + )] + fn _vminv_u16(a: int16x4_t) -> i16; + } + _vminv_u16(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = transmute(vqtbl1(transmute(a), b)); - let ret_val: uint8x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i16.v8i16" + )] + fn _vminvq_u16(a: int16x8_t) -> i16; + } + _vminvq_u16(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let x = transmute(vqtbl1q(transmute(a), b)); - x +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v2i32" + )] + fn _vminv_u32(a: int32x2_t) -> i32; + } + _vminv_u32(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] +#[doc = "Horizontal vector min."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x = transmute(vqtbl1q(transmute(a), b)); - let ret_val: uint8x16_t = x; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(uminv))] +pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminv.i32.v4i32" + )] + fn _vminvq_u32(a: int32x4_t) -> i32; + } + _vminvq_u32(a.as_signed()).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { - let x = transmute(vqtbl1(transmute(a), b)); - x +pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = transmute(vqtbl1(transmute(a), b)); - let ret_val: poly8x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { - let x = transmute(vqtbl1q(transmute(a), b)); - x +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x = transmute(vqtbl1q(transmute(a), b)); - let ret_val: poly8x16_t = x; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl2.v8i8" - )] - fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; - } - _vqtbl2(a, b, c.as_signed()) +pub unsafe fn vmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl2.v8i8" - )] - fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbl2(a, b, c.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl2.v16i8" - )] - fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _vqtbl2q(a, b, c.as_signed()) +pub unsafe fn vmlal_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl2.v16i8" - )] - fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbl2q(a, b, c.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmlal_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { - vqtbl2(a.0, a.1, b) +pub unsafe fn vmlal_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { - let mut a: int8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl2(a.0, a.1, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlal_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { - vqtbl2q(a.0, a.1, b) +pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vmlal_high_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { - let mut a: int8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbl2q(a.0, a.1, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vmlal_high_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { - transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) +pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + vmlal_high_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] +#[doc = "Multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + vmlal_high_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { - transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) +pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlal_s8(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlal_s16(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { - transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) +pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlal_s32(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlal_u8(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { - transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) +pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlal_u16(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x2_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlal_u32(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl3.v8i8" - )] - fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; - } - _vqtbl3(a, b, c, d.as_signed()) +pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl3.v8i8" - )] - fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbl3(a, b, c, d.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl3.v16i8" - )] - fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; - } - _vqtbl3q(a, b, c, d.as_signed()) +pub unsafe fn vmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl3.v16i8" - )] - fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbl3q(a, b, c, d.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_high_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { - vqtbl3(a.0, a.1, a.2, b) +pub unsafe fn vmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { - let mut a: int8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl3(a.0, a.1, a.2, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { - vqtbl3q(a.0, a.1, a.2, b) +pub unsafe fn vmlsl_high_lane_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { - let mut a: int8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbl3q(a.0, a.1, a.2, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmlsl_high_laneq_u16( + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_high_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { - transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) +pub unsafe fn vmlsl_high_lane_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsl_high_laneq_u32( + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_high_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { - transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) +pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vmlsl_high_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vmlsl_high_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { - transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) +pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + vmlsl_high_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] +#[doc = "Multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + vmlsl_high_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { - transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) +pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlsl_s8(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x3_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlsl_s16(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl4.v8i8" - )] - fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) - -> int8x8_t; - } - _vqtbl4(a, b, c, d, e.as_signed()) +pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: int32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlsl_s32(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl4.v8i8" - )] - fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) - -> int8x8_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbl4(a, b, c, d, e.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]); + vmlsl_u8(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: uint8x16_t, -) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl4.v16i8" - )] - fn _vqtbl4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - ) -> int8x16_t; - } - _vqtbl4q(a, b, c, d, e.as_signed()) +pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]); + vmlsl_u16(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbl4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: uint8x16_t, -) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbl4.v16i8" - )] - fn _vqtbl4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - ) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbl4q(a, b, c, d, e.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]); + vmlsl_u32(a, b, c) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { - vqtbl4(a.0, a.1, a.2, a.3, b) +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vmovl_s8(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { - let mut a: int8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl4(a.0, a.1, a.2, a.3, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vmovl_s16(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { - vqtbl4q(a.0, a.1, a.2, a.3, b) +#[cfg_attr(test, assert_instr(sxtl2))] +pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + vmovl_s32(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { - let mut a: int8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbl4q(a.0, a.1, a.2, a.3, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vmovl_u8(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { - transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )) +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vmovl_u16(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "Vector move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(uxtl2))] +pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + vmovl_u32(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { - transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + let c: int8x8_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + let c: int16x4_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { - transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + let c: int32x2_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3]) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + let c: uint8x8_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { - transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + let c: uint16x4_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] +#[doc = "Extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x4_t = a; - a.0 = simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.1 = simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.2 = simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - a.3 = simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(xtn2))] +pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + let c: uint32x2_t = simd_cast(b); + simd_shuffle!(a, c, [0, 1, 2, 3]) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx1.v8i8" - )] - fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; - } - _vqtbx1(a, b, c.as_signed()) +#[cfg_attr(test, assert_instr(fmul))] +pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_mul(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx1.v8i8" - )] - fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbx1(a, b, c.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(fmul))] +pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_mul(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx1.v16i8" - )] - fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _vqtbx1q(a, b, c.as_signed()) +pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx1.v16i8" - )] - fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbx1q(a, b, c.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, transmute::(simd_extract!(b, LANE as u32))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - vqtbx1(a, b, c) +pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { + simd_mul(a, vdup_n_f64(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx1(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { + simd_mul(a, vdupq_n_f64(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - vqtbx1q(a, b, c) +pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { + static_assert!(LANE == 0); + let b: f64 = simd_extract!(b, LANE as u32); + a * b } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbx1q(a, b, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_s16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) +} +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_high_s16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { - let x = transmute(vqtbx1(transmute(a), transmute(b), c)); - x +pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(smull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = transmute(vqtbx1(transmute(a), transmute(b), c)); - let ret_val: uint8x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_s32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); - x +pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_u16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); - let ret_val: uint8x16_t = x; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_high_u16( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { - let x = transmute(vqtbx1(transmute(a), transmute(b), c)); - x +pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = transmute(vqtbx1(transmute(a), transmute(b), c)); - let ret_val: poly8x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_high_u32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(smull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { - let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); - x +pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + vmull_high_s16(a, vdupq_n_s16(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(smull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); - let ret_val: poly8x16_t = x; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + vmull_high_s32(a, vdupq_n_s32(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx2.v8i8" - )] - fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; - } - _vqtbx2(a, b, c, d.as_signed()) +pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { + vmull_high_u16(a, vdupq_n_u16(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] +#[doc = "Multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(umull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx2.v8i8" - )] - fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbx2(a, b, c, d.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { + vmull_high_u32(a, vdupq_n_u32(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx2.v16i8" - )] - fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; - } - _vqtbx2q(a, b, c, d.as_signed()) +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { + vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx2.v16i8" - )] - fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbx2q(a, b, c, d.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_p8(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { - vqtbx2(a, b.0, b.1, c) +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_s8(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { - let mut b: int8x16x2_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx2(a, b.0, b.1, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vmull_s16(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { - vqtbx2q(a, b.0, b.1, c) +#[cfg_attr(test, assert_instr(smull2))] +pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + vmull_s32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { - let mut b: int8x16x2_t = b; - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbx2q(a, b.0, b.1, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + vmull_u8(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { - transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vmull_u16(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x2_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(umull2))] +pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + vmull_u32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { - transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) +#[cfg_attr(test, assert_instr(pmull))] +pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmull64" + )] + fn _vmull_p64(a: p64, b: p64) -> int8x16_t; + } + transmute(_vmull_p64(a, b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x2_t = b; - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { - transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) +pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x2_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + let b: f32 = simd_extract!(b, LANE as u32); + a * b } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { - transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) +pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + let b: f32 = simd_extract!(b, LANE as u32); + a * b } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmul, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x2_t = b; - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + let b: f64 = simd_extract!(b, LANE as u32); + a * b } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx3.v8i8" + link_name = "llvm.aarch64.neon.fmulx.v2f32" )] - fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; + fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - _vqtbx3(a, b, c, d, e.as_signed()) + _vmulx_f32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx3.v8i8" + link_name = "llvm.aarch64.neon.fmulx.v4f32" )] - fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; + fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbx3(a, b, c, d, e.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vmulxq_f32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx3q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: uint8x16_t, -) -> int8x16_t { +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx3.v16i8" + link_name = "llvm.aarch64.neon.fmulx.v1f64" )] - fn _vqtbx3q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - ) -> int8x16_t; + fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; } - _vqtbx3q(a, b, c, d, e.as_signed()) + _vmulx_f64(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx3q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: uint8x16_t, -) -> int8x16_t { +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx3.v16i8" + link_name = "llvm.aarch64.neon.fmulx.v2f64" )] - fn _vqtbx3q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - ) -> int8x16_t; + fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: uint8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbx3q(a, b, c, d, e.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vmulxq_f64(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { - vqtbx3(a, b.0, b.1, b.2, c) +pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { - let mut b: int8x16x3_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx3(a, b.0, b.1, b.2, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { - vqtbx3q(a, b.0, b.1, b.2, c) +pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { - let mut b: int8x16x3_t = b; - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbx3q(a, b.0, b.1, b.2, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmulxq_f32( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), ) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { - transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )) +pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x3_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { - transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )) +pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { + static_assert_uimm_bits!(LANE, 1); + vmulx_f64(a, transmute::(simd_extract!(b, LANE as u32))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x3_t = b; - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.f64" + )] + fn _vmulxd_f64(a: f64, b: f64) -> f64; + } + _vmulxd_f64(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { - transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )) +#[cfg_attr(test, assert_instr(fmulx))] +pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmulx.f32" + )] + fn _vmulxs_f32(a: f32, b: f32) -> f32; + } + _vmulxs_f32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x3_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { + static_assert!(LANE == 0); + vmulxd_f64(a, simd_extract!(b, LANE as u32)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { - transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )) +pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { + static_assert_uimm_bits!(LANE, 1); + vmulxd_f64(a, simd_extract!(b, LANE as u32)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x3_t = b; - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { + static_assert_uimm_bits!(LANE, 1); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx4( - a: int8x8_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: uint8x8_t, -) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx4.v8i8" - )] - fn _vqtbx4( - a: int8x8_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: int8x8_t, - ) -> int8x8_t; - } - _vqtbx4(a, b, c, d, e, f.as_signed()) +pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { + static_assert_uimm_bits!(LANE, 2); + vmulxs_f32(a, simd_extract!(b, LANE as u32)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] +#[doc = "Floating-point multiply extended"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(fmulx, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx4( - a: int8x8_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: uint8x8_t, -) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx4.v8i8" - )] - fn _vqtbx4( - a: int8x8_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: int8x8_t, - ) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: int8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint8x8_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqtbx4(a, b, c, d, e, f.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { + static_assert!(LANE == 0); + vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fneg))] +pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t { + simd_neg(a) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fneg))] +pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { + simd_neg(a) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t { + simd_neg(a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: uint8x16_t, -) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx4.v16i8" - )] - fn _vqtbx4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: int8x16_t, - ) -> int8x16_t; - } - _vqtbx4q(a, b, c, d, e, f.as_signed()) +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t { + simd_neg(a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -unsafe fn vqtbx4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: uint8x16_t, -) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.tbx4.v16i8" - )] - fn _vqtbx4q( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - e: int8x16_t, - f: int8x16_t, - ) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let d: int8x16_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let e: int8x16_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint8x16_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqtbx4q(a, b, c, d, e, f.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(neg))] +pub unsafe fn vnegd_s64(a: i64) -> i64 { + a.wrapping_neg() } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { - vqtbx4(a, b.0, b.1, b.2, b.3, c) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 { + let a1: f64 = simd_extract!(a, 0); + let a2: f64 = simd_extract!(a, 1); + a1 + a2 } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { - let mut b: int8x16x4_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx4(a, b.0, b.1, b.2, b.3, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 { + let a1: f32 = simd_extract!(a, 0); + let a2: f32 = simd_extract!(a, 1); + a1 + a2 } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { - vqtbx4q(a, b.0, b.1, b.2, b.3, c) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { + transmute(vaddvq_u64(transmute(a))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { - let mut b: int8x16x4_t = b; - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqtbx4q(a, b.0, b.1, b.2, b.3, c); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(vaddvq_u64(transmute(a))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { - transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 { + vaddvq_u64(a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x4_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v4f32" + )] + fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpaddq_f32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { - transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) +#[cfg_attr(test, assert_instr(faddp))] +pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f64" + )] + fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpaddq_f64(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x4_t = b; - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v16i8" + )] + fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpaddq_s8(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { - transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i16" + )] + fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpaddq_s16(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x4_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i32" + )] + fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpaddq_s32(a, b) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i64" + )] + fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vpaddq_s64(a, b) +} +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { - transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + transmute(vpaddq_s8(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x4_t = b; - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b))); simd_shuffle!( ret_val, ret_val, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Rotate and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(rax1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.rax1" - )] - fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + transmute(vpaddq_s16(transmute(a), transmute(b))) } -#[doc = "Rotate and exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(rax1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.rax1" - )] - fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v8i8" - )] - fn _vrbit_s8(a: int8x8_t) -> int8x8_t; - } - _vrbit_s8(a) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + transmute(vpaddq_s32(transmute(a), transmute(b))) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v8i8" - )] - fn _vrbit_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrbit_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v16i8" - )] - fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; - } - _vrbitq_s8(a) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + transmute(vpaddq_s64(transmute(a), transmute(b))) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rbit.v16i8" - )] - fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vrbitq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(addp))] +pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vrbit_s8(transmute(a))) +pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" + )] + fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmaxnm_f32(a, b) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" + )] + fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpmaxnmq_f32(a, b) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vrbitq_s8(transmute(a))) +pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" + )] + fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpmaxnmq_f64(a, b) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; + } + _vpmaxnmqd_f64(a) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { - transmute(vrbit_s8(transmute(a))) +pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vpmaxnms_f32(a: float32x2_t) -> f32; + } + _vpmaxnms_f32(a) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v4f32" + )] + fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpmaxq_f32(a, b) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { - transmute(vrbitq_s8(transmute(a))) +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f64" + )] + fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpmaxq_f64(a, b) } -#[doc = "Reverse bit order"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(rbit))] -pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v16i8" + )] + fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpmaxq_s8(a, b) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v1f64" + link_name = "llvm.aarch64.neon.smaxp.v8i16" )] - fn _vrecpe_f64(a: float64x1_t) -> float64x1_t; + fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vrecpe_f64(a) + _vpmaxq_s16(a, b) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { +#[cfg_attr(test, assert_instr(smaxp))] +pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f64" + link_name = "llvm.aarch64.neon.smaxp.v4i32" )] - fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; + fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vrecpeq_f64(a) + _vpmaxq_s32(a, b) } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f64" + link_name = "llvm.aarch64.neon.umaxp.v16i8" )] - fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; + fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrecpeq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecped_f64(a: f64) -> f64 { +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.f64" + link_name = "llvm.aarch64.neon.umaxp.v8i16" )] - fn _vrecped_f64(a: f64) -> f64; + fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vrecped_f64(a) + _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpes_f32(a: f32) -> f32 { +#[cfg_attr(test, assert_instr(umaxp))] +pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.f32" + link_name = "llvm.aarch64.neon.umaxp.v4i32" )] - fn _vrecpes_f32(a: f32) -> f32; + fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vrecpes_f32(a) + _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v1f64" + link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64" )] - fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vpmaxqd_f64(a: float64x2_t) -> f64; } - _vrecps_f64(a, b) + _vpmaxqd_f64(a) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] +#[doc = "Floating-point maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +#[cfg_attr(test, assert_instr(fmaxp))] +pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f64" + link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32" )] - fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vpmaxs_f32(a: float32x2_t) -> f32; } - _vrecpsq_f64(a, b) + _vpmaxs_f32(a) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f64" + link_name = "llvm.aarch64.neon.fminnmp.v2f32" )] - fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vrecpsq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vpminnm_f32(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { +pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.f64" + link_name = "llvm.aarch64.neon.fminnmp.v4f32" )] - fn _vrecpsd_f64(a: f64, b: f64) -> f64; + fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - _vrecpsd_f64(a, b) + _vpminnmq_f32(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecps))] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { +pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.f32" + link_name = "llvm.aarch64.neon.fminnmp.v2f64" )] - fn _vrecpss_f32(a: f32, b: f32) -> f32; + fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - _vrecpss_f32(a, b) + _vpminnmq_f64(a, b) } -#[doc = "Floating-point reciprocal exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpx))] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpxd_f64(a: f64) -> f64 { +pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpx.f64" + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" )] - fn _vrecpxd_f64(a: f64) -> f64; + fn _vpminnmqd_f64(a: float64x2_t) -> f64; } - _vrecpxd_f64(a) + _vpminnmqd_f64(a) } -#[doc = "Floating-point reciprocal exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frecpx))] +#[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrecpxs_f32(a: f32) -> f32 { +pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpx.f32" + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" )] - fn _vrecpxs_f32(a: f32) -> f32; + fn _vpminnms_f32(a: float32x2_t) -> f32; } - _vrecpxs_f32(a) + _vpminnms_f32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v4f32" + )] + fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vpminq_f32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f64" + )] + fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + _vpminq_f64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v16i8" + )] + fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpminq_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i16" + )] + fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpminq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(sminp))] +pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i32" + )] + fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpminq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v16i8" + )] + fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i16" + )] + fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(uminp))] +pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i32" + )] + fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vpminqd_f64(a: float64x2_t) -> f64; + } + _vpminqd_f64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(fminp))] +pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vpmins_f32(a: float32x2_t) -> f32; + } + _vpmins_f32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { - transmute(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v1i64" + )] + fn _vqabs_s64(a: int64x1_t) -> int64x1_t; + } + _vqabs_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i64" + )] + fn _vqabsq_s64(a: int64x2_t) -> int64x2_t; + } + _vqabsq_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { - transmute(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsb_s8(a: i8) -> i8 { + simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsh_s16(a: i16) -> i16 { + simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { - transmute(a) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabss_s32(a: i32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.i32" + )] + fn _vqabss_s32(a: i32) -> i32; + } + _vqabss_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "Signed saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] +pub unsafe fn vqabsd_s64(a: i64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.i64" + )] + fn _vqabsd_s64(a: i64) -> i64; + } + _vqabsd_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqadd_s8(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqadd_s16(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: uint8x8_t = vdup_n_u8(b); + simd_extract!(vqadd_u8(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { - transmute(a) +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: uint16x4_t = vdup_n_u16(b); + simd_extract!(vqadd_u16(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.i32" + )] + fn _vqadds_s32(a: i32, b: i32) -> i32; + } + _vqadds_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqadd))] +pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.i64" + )] + fn _vqaddd_s64(a: i64, b: i64) -> i64; + } + _vqaddd_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.i32" + )] + fn _vqadds_u32(a: i32, b: i32) -> i32; + } + _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(uqadd))] +pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.i64" + )] + fn _vqaddd_u64(a: i64, b: i64) -> i64; + } + _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmlal_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vqdmlal_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vqdmlal_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlal_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vqaddq_s32(a, vqdmull_high_n_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + vqaddq_s32(a, vqdmull_high_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vqaddq_s64(a, vqdmull_high_n_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - transmute(a) +pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + vqaddq_s64(a, vqdmull_high_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 2))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqdmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - transmute(a) +pub unsafe fn vqdmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - transmute(a) +pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - transmute(a) +pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { + let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); + vqadds_s32(a, simd_extract!(x, 0)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - transmute(a) +pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { + let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c)); + x as i64 } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmlsl_high_lane_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { - transmute(a) +pub unsafe fn vqdmlsl_high_laneq_s16( + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmlsl_high_lane_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vqdmlsl_high_laneq_s32( + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + vqsubq_s32(a, vqdmull_high_n_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + vqsubq_s32(a, vqdmull_high_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + vqsubq_s64(a, vqdmull_high_n_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + vqsubq_s64(a, vqdmull_high_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vqdmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { + static_assert_uimm_bits!(LANE, 3); + vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { + static_assert_uimm_bits!(LANE, 1); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { + static_assert_uimm_bits!(LANE, 2); + vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { + let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); + vqsubs_s32(a, simd_extract!(x, 0)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmlsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { + let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c)); + x as i64 } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(N, 2); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(N, 3); + let b: i16 = simd_extract!(b, N as u32); + vqdmulhh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqdmulh_s16(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + simd_extract!(vqdmulh_s32(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(N, 1); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmulh, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: i32 = simd_extract!(b, N as u32); + vqdmulhs_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2, N = 4))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = vdup_n_s16(b); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = vdup_n_s32(b); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(N, 3); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(N, 2); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { + static_assert_uimm_bits!(N, 2); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { + static_assert_uimm_bits!(N, 2); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 4))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { + static_assert_uimm_bits!(N, 3); + let b: i16 = simd_extract!(b, N as u32); + vqdmullh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqdmull_s16(a, b), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { + static_assert_uimm_bits!(N, 1); + let b: i32 = simd_extract!(b, N as u32); + vqdmulls_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulls.scalar" + )] + fn _vqdmulls_s32(a: i32, b: i32) -> i64; + } + _vqdmulls_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + simd_shuffle!( + a, + vqmovn_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + simd_shuffle!( + a, + vqmovn_u16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqmovnd_s64(a: i64) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64" + )] + fn _vqmovnd_s64(a: i64) -> i32; + } + _vqmovnd_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqmovnd_u64(a: u64) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" + )] + fn _vqmovnd_u64(a: i64) -> i32; + } + _vqmovnd_u64(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqmovnh_s16(a: i16) -> i8 { + simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqmovns_s32(a: i32) -> i16 { + simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqmovnh_u16(a: u16) -> u8 { + simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "Saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqmovns_u32(a: u32) -> u16 { + simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + simd_shuffle!( + a, + vqmovun_s16(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - transmute(a) +pub unsafe fn vqmovunh_s16(a: i16) -> u8 { + simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqmovuns_s32(a: i32) -> u16 { + simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - transmute(a) +pub unsafe fn vqmovund_s64(a: i64) -> u32 { + simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v1i64" + )] + fn _vqneg_s64(a: int64x1_t) -> int64x1_t; + } + _vqneg_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i64" + )] + fn _vqnegq_s64(a: int64x2_t) -> int64x2_t; + } + _vqnegq_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegb_s8(a: i8) -> i8 { + simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegh_s16(a: i16) -> i16 { + simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegs_s32(a: i32) -> i32 { + simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { - transmute(a) +#[cfg_attr(test, assert_instr(sqneg))] +pub unsafe fn vqnegd_s64(a: i64) -> i64 { + simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - transmute(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlah_s16(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlah_s32(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { - transmute(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlah_s16(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlah_s32(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { - transmute(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlahq_s16(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlahq_s32(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { - transmute(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlahq_s16(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlahq_s32(a, b, c) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f32" + link_name = "llvm.aarch64.neon.sqrdmlah.v4i16" )] - fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; + fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; } - _vrnd32x_f32(a) + _vqrdmlah_s16(a, b, c) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f32" + link_name = "llvm.aarch64.neon.sqrdmlah.v8i16" )] - fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; + fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnd32x_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqrdmlahq_s16(a, b, c) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v4f32" + link_name = "llvm.aarch64.neon.sqrdmlah.v2i32" )] - fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; } - _vrnd32xq_f32(a) + _vqrdmlah_s32(a, b, c) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v4f32" + link_name = "llvm.aarch64.neon.sqrdmlah.v4i32" )] - fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrnd32xq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vqrdmlahq_s32(a, b, c) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f64" - )] - fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd32xq_f64(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f64" - )] - fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrnd32xq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32x.f64" - )] - fn _vrnd32x_f64(a: f64) -> f64; - } - transmute(_vrnd32x_f64(simd_extract!(a, 0))) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f32" - )] - fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd32z_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f32" - )] - fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnd32z_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + let c: int16x4_t = vdup_n_s16(c); + simd_extract!(vqrdmlah_s16(a, b, c), 0) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] +#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v4f32" - )] - fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd32zq_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + let c: int32x2_t = vdup_n_s32(c); + simd_extract!(vqrdmlah_s32(a, b, c), 0) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v4f32" - )] - fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrnd32zq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlsh_s16(a, b, c) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f64" - )] - fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd32zq_f64(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlsh_s32(a, b, c) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f64" - )] - fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrnd32zq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlsh_s16(a, b, c) } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32z.f64" - )] - fn _vrnd32z_f64(a: f64) -> f64; - } - transmute(_vrnd32z_f64(simd_extract!(a, 0))) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vqrdmlsh_s32(a, b, c) +} +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlshq_s16(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f32" - )] - fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd64x_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlshq_s32(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f32" - )] - fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnd64x_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let c: int16x8_t = simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmlshq_s16(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v4f32" - )] - fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd64xq_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmlshq_s32(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v4f32" + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16" )] - fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; + fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrnd64xq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vqrdmlsh_s16(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f64" + link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16" )] - fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; + fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } - _vrnd64xq_f64(a) + _vqrdmlshq_s16(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f64" + link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32" )] - fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; + fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrnd64xq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqrdmlsh_s32(a, b, c) } -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64x.f64" + link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32" )] - fn _vrnd64x_f64(a: f64) -> f64; + fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - transmute(_vrnd64x_f64(simd_extract!(a, 0))) + _vqrdmlshq_s32(a, b, c) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f32" - )] - fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd64z_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f32" - )] - fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnd64z_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v4f32" - )] - fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; - } - _vrnd64zq_f32(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v4f32" - )] - fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrnd64zq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] +#[rustc_legacy_const_generics(3)] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f64" - )] - fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; - } - _vrnd64zq_f64(a) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + let c: int16x4_t = vdup_n_s16(c); + simd_extract!(vqrdmlsh_s16(a, b, c), 0) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] +#[doc = "Signed saturating rounding doubling multiply subtract returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f64" - )] - fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrnd64zq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "rdm")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +#[stable(feature = "rdm_intrinsics", since = "1.62.0")] +pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { + let a: int32x2_t = vdup_n_s32(a); + let b: int32x2_t = vdup_n_s32(b); + let c: int32x2_t = vdup_n_s32(c); + simd_extract!(vqrdmlsh_s32(a, b, c), 0) } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64z.f64" - )] - fn _vrnd64z_f64(a: f64) -> f64; - } - transmute(_vrnd64z_f64(simd_extract!(a, 0))) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { + static_assert_uimm_bits!(LANE, 2); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f32" - )] - fn _vrnd_f32(a: float32x2_t) -> float32x2_t; - } - _vrnd_f32(a) +pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { + static_assert_uimm_bits!(LANE, 3); + vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f32" - )] - fn _vrnd_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnd_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { + static_assert_uimm_bits!(LANE, 1); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v4f32" - )] - fn _vrndq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndq_f32(a) +pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { + static_assert_uimm_bits!(LANE, 2); + vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v4f32" - )] - fn _vrndq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { + simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v1f64" - )] - fn _vrnd_f64(a: float64x1_t) -> float64x1_t; - } - _vrnd_f64(a) +pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { + simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f64" - )] - fn _vrndq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndq_f64(a) +pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqrshl_s8(a, b), 0) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.trunc.v2f64" - )] - fn _vrndq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqrshl_s16(a, b), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f32" - )] - fn _vrnda_f32(a: float32x2_t) -> float32x2_t; - } - _vrnda_f32(a) +pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqrshl_u8(a, b), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f32" - )] - fn _vrnda_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrnda_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqrshl_u16(a, b), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { +pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v4f32" + link_name = "llvm.aarch64.neon.sqrshl.i64" )] - fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; + fn _vqrshld_s64(a: i64, b: i64) -> i64; } - _vrndaq_f32(a) + _vqrshld_s64(a, b) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { +pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v4f32" + link_name = "llvm.aarch64.neon.sqrshl.i32" )] - fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; + fn _vqrshls_s32(a: i32, b: i32) -> i32; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndaq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vqrshls_s32(a, b) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { +pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v1f64" + link_name = "llvm.aarch64.neon.uqrshl.i32" )] - fn _vrnda_f64(a: float64x1_t) -> float64x1_t; + fn _vqrshls_u32(a: i32, b: i32) -> i32; } - _vrnda_f64(a) + _vqrshls_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { +pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f64" + link_name = "llvm.aarch64.neon.uqrshl.i64" )] - fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; + fn _vqrshld_u64(a: i64, b: i64) -> i64; } - _vrndaq_f64(a) + _vqrshld_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.round.v2f64" - )] - fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndaq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f32" - )] - fn _vrndi_f32(a: float32x2_t) -> float32x2_t; - } - _vrndi_f32(a) +pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f32" - )] - fn _vrndi_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrndi_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v4f32" - )] - fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndiq_f32(a) +pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v4f32" - )] - fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndiq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v1f64" - )] - fn _vrndi_f64(a: float64x1_t) -> float64x1_t; - } - _vrndi_f64(a) +pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f64" - )] - fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndiq_f64(a) +pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { + static_assert!(N >= 1 && N <= 32); + let a: uint64x2_t = vdupq_n_u64(a); + simd_extract!(vqrshrn_n_u64::(a), 0) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f64" - )] - fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndiq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { + static_assert!(N >= 1 && N <= 8); + let a: uint16x8_t = vdupq_n_u16(a); + simd_extract!(vqrshrn_n_u16::(a), 0) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] +#[doc = "Unsigned saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f32" - )] - fn _vrndm_f32(a: float32x2_t) -> float32x2_t; - } - _vrndm_f32(a) +pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { + static_assert!(N >= 1 && N <= 16); + let a: uint32x4_t = vdupq_n_u32(a); + simd_extract!(vqrshrn_n_u32::(a), 0) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f32" - )] - fn _vrndm_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrndm_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { + static_assert!(N >= 1 && N <= 8); + let a: int16x8_t = vdupq_n_s16(a); + simd_extract!(vqrshrn_n_s16::(a), 0) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v4f32" - )] - fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndmq_f32(a) +pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { + static_assert!(N >= 1 && N <= 16); + let a: int32x4_t = vdupq_n_s32(a); + simd_extract!(vqrshrn_n_s32::(a), 0) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v4f32" - )] - fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndmq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { + static_assert!(N >= 1 && N <= 32); + let a: int64x2_t = vdupq_n_s64(a); + simd_extract!(vqrshrn_n_s64::(a), 0) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v1f64" - )] - fn _vrndm_f64(a: float64x1_t) -> float64x1_t; - } - _vrndm_f64(a) +pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vqrshrun_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f64" - )] - fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndmq_f64(a) +pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.floor.v2f64" - )] - fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndmq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v1f64" - )] - fn _vrndn_f64(a: float64x1_t) -> float64x1_t; - } - _vrndn_f64(a) +pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { + static_assert!(N >= 1 && N <= 32); + let a: int64x2_t = vdupq_n_s64(a); + simd_extract!(vqrshrun_n_s64::(a), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f64" - )] - fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndnq_f64(a) +pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { + static_assert!(N >= 1 && N <= 8); + let a: int16x8_t = vdupq_n_s16(a); + simd_extract!(vqrshrun_n_s16::(a), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f64" - )] - fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndnq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { + static_assert!(N >= 1 && N <= 16); + let a: int32x4_t = vdupq_n_s32(a); + simd_extract!(vqrshrun_n_s32::(a), 0) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub unsafe fn vrndns_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.f32" - )] - fn _vrndns_f32(a: f32) -> f32; - } - _vrndns_f32(a) +pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshl_n_s8::(vdup_n_s8(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f32" - )] - fn _vrndp_f32(a: float32x2_t) -> float32x2_t; - } - _vrndp_f32(a) +pub unsafe fn vqshld_n_s64(a: i64) -> i64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshl_n_s64::(vdup_n_s64(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f32" - )] - fn _vrndp_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrndp_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshl_n_s16::(vdup_n_s16(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v4f32" - )] - fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndpq_f32(a) +pub unsafe fn vqshls_n_s32(a: i32) -> i32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshl_n_s32::(vdup_n_s32(a)), 0) +} +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshl_n_u8::(vdup_n_u8(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v4f32" - )] - fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndpq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqshld_n_u64(a: u64) -> u64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshl_n_u64::(vdup_n_u64(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v1f64" - )] - fn _vrndp_f64(a: float64x1_t) -> float64x1_t; - } - _vrndp_f64(a) +pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshl_n_u16::(vdup_n_u16(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f64" - )] - fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; - } - _vrndpq_f64(a) +pub unsafe fn vqshls_n_u32(a: u32) -> u32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshl_n_u32::(vdup_n_u32(a)), 0) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ceil.v2f64" - )] - fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndpq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 { + let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f32" - )] - fn _vrndx_f32(a: float32x2_t) -> float32x2_t; - } - _vrndx_f32(a) +pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 { + let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f32" - )] - fn _vrndx_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrndx_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { + let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v4f32" - )] - fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndxq_f32(a) +pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 { + let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v4f32" - )] - fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndxq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 { + let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v1f64" - )] - fn _vrndx_f64(a: float64x1_t) -> float64x1_t; - } - _vrndx_f64(a) +pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { + let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b)); + simd_extract!(c, 0) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { +pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f64" + link_name = "llvm.aarch64.neon.sqshl.i64" )] - fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; + fn _vqshld_s64(a: i64, b: i64) -> i64; } - _vrndxq_f64(a) + _vqshld_s64(a, b) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { +pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.rint.v2f64" + link_name = "llvm.aarch64.neon.uqshl.i64" )] - fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; + fn _vqshld_u64(a: i64, b: i64) -> i64; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrndxq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqshld_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshl))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.i64" - )] - fn _vrshld_s64(a: i64, b: i64) -> i64; - } - _vrshld_s64(a, b) +pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { + static_assert_uimm_bits!(N, 3); + simd_extract!(vqshlu_n_s8::(vdup_n_s8(a)), 0) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshl))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.i64" - )] - fn _vrshld_u64(a: i64, b: i64) -> i64; - } - _vrshld_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { + static_assert_uimm_bits!(N, 6); + simd_extract!(vqshlu_n_s64::(vdup_n_s64(a)), 0) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - vrshld_s64(a, -N as i64) +pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { + static_assert_uimm_bits!(N, 4); + simd_extract!(vqshlu_n_s16::(vdup_n_s16(a)), 0) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - vrshld_u64(a, -N as i64) +pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { + static_assert_uimm_bits!(N, 5); + simd_extract!(vqshlu_n_s32::(vdup_n_s32(a)), 0) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { +pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); simd_shuffle!( a, - vrshrn_n_s16::(b), + vqshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { +pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - vrshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); simd_shuffle!( - ret_val, - ret_val, + a, + vqshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { +pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) + simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { +pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.i32" + )] + fn _vqshrnd_n_s64(a: i64, n: i32) -> i32; + } + _vqshrnd_n_s64(a, N) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { +pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.i32" + )] + fn _vqshrnd_n_u64(a: i64, n: i32) -> i32; + } + _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { +pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + simd_extract!(vqshrn_n_s16::(vdupq_n_s16(a)), 0) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrn_n_s32::(vdupq_n_s32(a)), 0) +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { + static_assert!(N >= 1 && N <= 8); + simd_extract!(vqshrn_n_u16::(vdupq_n_u16(a)), 0) +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrn_n_u32::(vdupq_n_u32(a)), 0) +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { +pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vrshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); simd_shuffle!( - ret_val, - ret_val, + a, + vqshrun_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { +pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) + simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] +#[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { +pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) + simd_extract!(vqshrun_n_s64::(vdupq_n_s64(a)), 0) +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { + static_assert!(N >= 1 && N <= 8); + simd_extract!(vqshrun_n_s16::(vdupq_n_s16(a)), 0) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { + static_assert!(N >= 1 && N <= 16); + simd_extract!(vqshrun_n_s32::(vdupq_n_s32(a)), 0) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v1f64" - )] - fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; - } - _vrsqrte_f64(a) +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 { + let a: int8x8_t = vdup_n_s8(a); + let b: int8x8_t = vdup_n_s8(b); + simd_extract!(vqsub_s8(a, b), 0) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f64" - )] - fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; - } - _vrsqrteq_f64(a) +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 { + let a: int16x4_t = vdup_n_s16(a); + let b: int16x4_t = vdup_n_s16(b); + simd_extract!(vqsub_s16(a, b), 0) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f64" - )] - fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; - } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = _vrsqrteq_f64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 { + let a: uint8x8_t = vdup_n_u8(a); + let b: uint8x8_t = vdup_n_u8(b); + simd_extract!(vqsub_u8(a, b), 0) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrted_f64(a: f64) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f64" - )] - fn _vrsqrted_f64(a: f64) -> f64; - } - _vrsqrted_f64(a) +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { + let a: uint16x4_t = vdup_n_u16(a); + let b: uint16x4_t = vdup_n_u16(b); + simd_extract!(vqsub_u16(a, b), 0) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f32" + link_name = "llvm.aarch64.neon.sqsub.i32" )] - fn _vrsqrtes_f32(a: f32) -> f32; + fn _vqsubs_s32(a: i32, b: i32) -> i32; } - _vrsqrtes_f32(a) + _vqsubs_s32(a, b) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { +#[cfg_attr(test, assert_instr(sqsub))] +pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v1f64" + link_name = "llvm.aarch64.neon.sqsub.i64" )] - fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; + fn _vqsubd_s64(a: i64, b: i64) -> i64; } - _vrsqrts_f64(a, b) + _vqsubd_s64(a, b) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f64" + link_name = "llvm.aarch64.neon.uqsub.i32" )] - fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vqsubs_u32(a: i32, b: i32) -> i32; } - _vrsqrtsq_f64(a, b) + _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +#[cfg_attr(test, assert_instr(uqsub))] +pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f64" + link_name = "llvm.aarch64.neon.uqsub.i64" )] - fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vqsubd_u64(a: i64, b: i64) -> i64; } - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = _vrsqrtsq_f64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { +unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f64" + link_name = "llvm.aarch64.neon.tbl1.v8i8" )] - fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; + fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; } - _vrsqrtsd_f64(a, b) + _vqtbl1(a, b.as_signed()) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { +unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f32" + link_name = "llvm.aarch64.neon.tbl1.v16i8" )] - fn _vrsqrtss_f32(a: f32, b: f32) -> f32; + fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vrsqrtss_f32(a, b) + _vqtbl1q(a, b.as_signed()) } -#[doc = "Signed rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - let b: i64 = vrshrd_n_s64::(b); - a.wrapping_add(b) +pub unsafe fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + vqtbl1(a, b) } -#[doc = "Unsigned rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - let b: u64 = vrshrd_n_u64::(b); - a.wrapping_add(b) +pub unsafe fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + vqtbl1q(a, b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let x: int8x8_t = vrsubhn_s16(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { + let x = transmute(vqtbl1(transmute(a), b)); + x } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: int8x8_t = vrsubhn_s16(b, c); - let ret_val: int8x16_t = - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let x = transmute(vqtbl1q(transmute(a), b)); + x } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let x: int16x4_t = vrsubhn_s32(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { + let x = transmute(vqtbl1(transmute(a), b)); + x } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let x: int16x4_t = vrsubhn_s32(b, c); - let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { + let x = transmute(vqtbl1q(transmute(a), b)); + x } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let x: int32x2_t = vrsubhn_s64(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3]) +unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v8i8" + )] + fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + } + _vqtbl2(a, b, c.as_signed()) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let x: int32x2_t = vrsubhn_s64(b, c); - let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl2.v16i8" + )] + fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vqtbl2q(a, b, c.as_signed()) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let x: uint8x8_t = vrsubhn_u16(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { + vqtbl2(a.0, a.1, b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: uint8x8_t = vrsubhn_u16(b, c); - let ret_val: uint8x16_t = - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + vqtbl2q(a.0, a.1, b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let x: uint16x4_t = vrsubhn_u32(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let x: uint16x4_t = vrsubhn_u32(b, c); - let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let x: uint32x2_t = vrsubhn_u64(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3]) +pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(rsubhn2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let x: uint32x2_t = vrsubhn_u64(b, c); - let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "SHA512 hash update part 2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h2))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h2" - )] - fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "SHA512 hash update part 2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h2))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h2" - )] - fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = - _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "SHA512 hash update part 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h" - )] - fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } -#[doc = "SHA512 hash update part 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h" - )] - fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = - _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x2_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "SHA512 schedule update 0"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su0))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su0" + link_name = "llvm.aarch64.neon.tbl3.v8i8" )] - fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; } - _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqtbl3(a, b, c, d.as_signed()) } -#[doc = "SHA512 schedule update 0"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su0))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su0" + link_name = "llvm.aarch64.neon.tbl3.v16i8" )] - fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqtbl3q(a, b, c, d.as_signed()) } -#[doc = "SHA512 schedule update 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su1" - )] - fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { + vqtbl3(a.0, a.1, a.2, b) } -#[doc = "SHA512 schedule update 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su1" - )] - fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = - _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { + vqtbl3q(a.0, a.1, a.2, b) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshl))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { - transmute(vshl_s64(transmute(a), transmute(b))) +pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushl))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { - transmute(vshl_u64(transmute(a), transmute(b))) +pub unsafe fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vshll_n_s8::(b) +pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = vshll_n_s8::(b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vshll_n_s16::(b) +pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let ret_val: int32x4_t = vshll_n_s16::(b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); - vshll_n_s32::(b) +pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let ret_val: int64x2_t = vshll_n_s32::(b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x3_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - vshll_n_u8::(b) +unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v8i8" + )] + fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) + -> int8x8_t; + } + _vqtbl4(a, b, c, d, e.as_signed()) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = vshll_n_u8::(b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +unsafe fn vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbl4.v16i8" + )] + fn _vqtbl4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; + } + _vqtbl4q(a, b, c, d, e.as_signed()) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - vshll_n_u16::(b) +pub unsafe fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { + vqtbl4(a.0, a.1, a.2, a.3, b) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let ret_val: uint32x4_t = vshll_n_u16::(b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + vqtbl4q(a.0, a.1, a.2, a.3, b) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - vshll_n_u32::(b) +pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let ret_val: uint64x2_t = vshll_n_u32::(b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - vshrn_n_s16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +pub unsafe fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) +pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x4_t = a; + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_shuffle!( - a, - vshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v8i8" + )] + fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + } + _vqtbx1(a, b, c.as_signed()) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - vshrn_n_u16::(b), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx1.v16i8" + )] + fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + _vqtbx1q(a, b, c.as_signed()) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + vqtbx1(a, b, c) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + vqtbx1q(a, b, c) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) +pub unsafe fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + x } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(shrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + x } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i8" - )] - fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - } - _vsli_n_s8(a, b, N) +pub unsafe fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { + let x = transmute(vqtbx1(transmute(a), transmute(b), c)); + x } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i8" - )] - fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vsli_n_s8(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { + let x = transmute(vqtbx1q(transmute(a), transmute(b), c)); + x } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); +unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v16i8" + link_name = "llvm.aarch64.neon.tbx2.v8i8" )] - fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; } - _vsliq_n_s8(a, b, N) + _vqtbx2(a, b, c, d.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); +unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v16i8" + link_name = "llvm.aarch64.neon.tbx2.v16i8" )] - fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vsliq_n_s8(a, b, N); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vqtbx2q(a, b, c, d.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i16" - )] - fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - } - _vsli_n_s16(a, b, N) +pub unsafe fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { + vqtbx2(a, b.0, b.1, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i16" - )] - fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vsli_n_s16(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + vqtbx2q(a, b.0, b.1, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i16" - )] - fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - } - _vsliq_n_s16(a, b, N) +pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i16" - )] - fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vsliq_n_s16(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i32" - )] - fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - } - _vsli_n_s32(a, b, N) +pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i32" - )] - fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vsli_n_s32(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x2_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i32" - )] - fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - } - _vsliq_n_s32(a, b, N) +pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i32" - )] - fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vsliq_n_s32(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 0 && N <= 63); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v1i64" - )] - fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; - } - _vsli_n_s64(a, b, N) +pub unsafe fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x2_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); +unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i64" + link_name = "llvm.aarch64.neon.tbx3.v8i8" )] - fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; } - _vsliq_n_s64(a, b, N) + _vqtbx3(a, b, c, d, e.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); +unsafe fn vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x16_t, +) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i64" + link_name = "llvm.aarch64.neon.tbx3.v16i8" )] - fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + fn _vqtbx3q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + ) -> int8x16_t; } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vsliq_n_s64(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vqtbx3q(a, b, c, d, e.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vsli_n_s8::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { + vqtbx3(a, b.0, b.1, b.2, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { + vqtbx3q(a, b.0, b.1, b.2, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vsliq_n_s8::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = transmute(vsliq_n_s8::(transmute(a), transmute(b))); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vsli_n_s16::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x3_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vsliq_n_s16::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - transmute(vsli_n_s32::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = transmute(vsli_n_s32::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - transmute(vsliq_n_s32::(transmute(a), transmute(b))) +pub unsafe fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x3_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = transmute(vsliq_n_s32::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +unsafe fn vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x8_t, +) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx4.v8i8" + )] + fn _vqtbx4( + a: int8x8_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x8_t, + ) -> int8x8_t; + } + _vqtbx4(a, b, c, d, e, f.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64::(transmute(a), transmute(b))) +unsafe fn vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: uint8x16_t, +) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.tbx4.v16i8" + )] + fn _vqtbx4q( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: int8x16_t, + f: int8x16_t, + ) -> int8x16_t; + } + _vqtbx4q(a, b, c, d, e, f.as_signed()) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsliq_n_s64::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { + vqtbx4(a, b.0, b.1, b.2, b.3, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + vqtbx4q(a, b.0, b.1, b.2, b.3, c) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vsli_n_s8::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = transmute(vsli_n_s8::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vsliq_n_s8::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = transmute(vsliq_n_s8::(transmute(a), transmute(b))); +pub unsafe fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x4_t = b; + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vsli_n_s16::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = transmute(vsli_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vsliq_n_s16::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = transmute(vsliq_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64::(transmute(a), transmute(b))) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 0 && N <= 63); - transmute(vsliq_n_s64::(transmute(a), transmute(b))) +pub unsafe fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x4_t = b; + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] +#[doc = "Rotate and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 0 && N <= 63); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = transmute(vsliq_n_s64::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(rax1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.rax1" + )] + fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rbit.v8i8" + )] + fn _vrbit_s8(a: int8x8_t) -> int8x8_t; + } + _vrbit_s8(a) } -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 0 && N <= 63); - transmute(vsli_n_u64::(transmute(a), transmute(b))) -} -#[doc = "SM3PARTW1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw1" + link_name = "llvm.aarch64.neon.rbit.v16i8" )] - fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vrbitq_s8(a: int8x16_t) -> int8x16_t; } - _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vrbitq_s8(a) } -#[doc = "SM3PARTW1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vrbit_s8(transmute(a))) +} +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw1" - )] - fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "SM3PARTW2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw2))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw2" - )] - fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vrbitq_s8(transmute(a))) } -#[doc = "SM3PARTW2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw2))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw2" - )] - fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "SM3SS1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3ss1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3ss1" - )] - fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { + transmute(vrbit_s8(transmute(a))) } -#[doc = "SM3SS1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3ss1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3ss1" - )] - fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "SM3TT1A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1a" - )] - fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; - } - _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { + transmute(vrbitq_s8(transmute(a))) } -#[doc = "SM3TT1A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] +#[doc = "Reverse bit order"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1a" - )] - fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(rbit))] +pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "SM3TT1B"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1b" + link_name = "llvm.aarch64.neon.frecpe.v1f64" )] - fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecpe_f64(a: float64x1_t) -> float64x1_t; } - _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vrecpe_f64(a) } -#[doc = "SM3TT1B"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt1bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1b" + link_name = "llvm.aarch64.neon.frecpe.v2f64" )] - fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpeq_f64(a) } -#[doc = "SM3TT2A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecped_f64(a: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2a" + link_name = "llvm.aarch64.neon.frecpe.f64" )] - fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecped_f64(a: f64) -> f64; } - _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vrecped_f64(a) } -#[doc = "SM3TT2A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2aq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpe))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpes_f32(a: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2a" + link_name = "llvm.aarch64.neon.frecpe.f32" )] - fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecpes_f32(a: f32) -> f32; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpes_f32(a) } -#[doc = "SM3TT2B"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2b" + link_name = "llvm.aarch64.neon.frecps.v1f64" )] - fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; } - _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vrecps_f64(a, b) } -#[doc = "SM3TT2B"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm3tt2bq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(IMM2, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2b" + link_name = "llvm.aarch64.neon.frecps.v2f64" )] - fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpsq_f64(a, b) } -#[doc = "SM4 key"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4ekey))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4ekey" + link_name = "llvm.aarch64.neon.frecps.f64" )] - fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpsd_f64(a: f64, b: f64) -> f64; } - _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrecpsd_f64(a, b) } -#[doc = "SM4 key"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4ekey))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecps))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4ekey" + link_name = "llvm.aarch64.neon.frecps.f32" )] - fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpss_f32(a: f32, b: f32) -> f32; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpss_f32(a, b) } -#[doc = "SM4 encode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] +#[doc = "Floating-point reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4e))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpxd_f64(a: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4e" + link_name = "llvm.aarch64.neon.frecpx.f64" )] - fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpxd_f64(a: f64) -> f64; } - _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrecpxd_f64(a) } -#[doc = "SM4 encode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] +#[doc = "Floating-point reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm4e))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frecpx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrecpxs_f32(a: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm4e" + link_name = "llvm.aarch64.neon.frecpx.f32" )] - fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpxs_f32(a: f32) -> f32; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpxs_f32(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v8i8" - )] - fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vsqadd_u8(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v8i8" - )] - fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vsqadd_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v16i8" - )] - fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vsqaddq_u8(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v16i8" - )] - fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vsqaddq_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v4i16" - )] - fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vsqadd_u16(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v4i16" - )] - fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vsqadd_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v8i16" - )] - fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vsqaddq_u16(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v8i16" - )] - fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vsqaddq_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v2i32" - )] - fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vsqadd_u32(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v2i32" - )] - fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vsqadd_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v4i32" - )] - fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vsqaddq_u32(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v4i32" - )] - fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vsqaddq_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v1i64" - )] - fn _vsqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vsqadd_u64(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v2i64" - )] - fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vsqaddq_u64(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { + transmute(a) } -#[doc = "Unsigned saturating Accumulate of Signed value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usqadd))] -pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.v2i64" - )] - fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vsqaddq_u64(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { - simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { + transmute(a) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { - simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { + transmute(a) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.i64" - )] - fn _vsqaddd_u64(a: i64, b: i64) -> i64; - } - _vsqaddd_u64(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned saturating accumulate of signed value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usqadd.i32" - )] - fn _vsqadds_u32(a: i32, b: i32) -> i32; - } - _vsqadds_u32(a.as_signed(), b).as_unsigned() +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { - simd_fsqrt(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_fsqrt(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { - simd_fsqrt(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_fsqrt(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { - simd_fsqrt(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { - simd_fsqrt(a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { + transmute(a) } -#[doc = "Calculates the square root of each lane."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float64x2_t = simd_fsqrt(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i8" - )] - fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - } - _vsri_n_s8(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i8" - )] - fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vsri_n_s8(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v16i8" - )] - fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; - } - _vsriq_n_s8(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v16i8" - )] - fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vsriq_n_s8(a, b, N); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i16" - )] - fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - } - _vsri_n_s16(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i16" - )] - fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vsri_n_s16(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i16" - )] - fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - } - _vsriq_n_s16(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i16" - )] - fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vsriq_n_s16(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i32" - )] - fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - } - _vsri_n_s32(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i32" - )] - fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vsri_n_s32(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i32" - )] - fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - } - _vsriq_n_s32(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i32" - )] - fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vsriq_n_s32(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v1i64" - )] - fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; - } - _vsri_n_s64(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i64" - )] - fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; - } - _vsriq_n_s64(a, b, N) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i64" - )] - fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vsriq_n_s64(a, b, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsri_n_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsriq_n_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = transmute(vsriq_n_s8::(transmute(a), transmute(b))); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsri_n_s16::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsriq_n_s16::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - transmute(vsri_n_s32::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = transmute(vsri_n_s32::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - transmute(vsriq_n_s32::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = transmute(vsriq_n_s32::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsriq_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsri_n_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = transmute(vsri_n_s8::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N >= 1 && N <= 8); - transmute(vsriq_n_s8::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = transmute(vsriq_n_s8::(transmute(a), transmute(b))); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsri_n_s16::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = transmute(vsri_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N >= 1 && N <= 16); - transmute(vsriq_n_s16::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = transmute(vsriq_n_s16::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 1 && N <= 64); - transmute(vsriq_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sri, N = 1))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = transmute(vsriq_n_s64::(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift right and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] -pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_s64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Shift right and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] -pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - transmute(vsri_n_u64::(transmute(a), transmute(b))) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f32" + )] + fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd32x_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v4f32" + )] + fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd32xq_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f64" + )] + fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd32xq_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint32x.f64" + )] + fn _vrnd32x_f64(a: f64) -> f64; + } + transmute(_vrnd32x_f64(simd_extract!(a, 0))) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v2f32" + )] + fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd32z_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v4f32" + )] + fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd32zq_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v2f64" + )] + fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd32zq_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint32z.f64" + )] + fn _vrnd32z_f64(a: f64) -> f64; + } + transmute(_vrnd32z_f64(simd_extract!(a, 0))) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f32" + )] + fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd64x_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v4f32" + )] + fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd64xq_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f64" + )] + fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd64xq_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint64x.f64" + )] + fn _vrnd64x_f64(a: f64) -> f64; + } + transmute(_vrnd64x_f64(simd_extract!(a, 0))) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f32" + )] + fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd64z_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v4f32" + )] + fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; + } + _vrnd64zq_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f64" + )] + fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; + } + _vrnd64zq_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint64z.f64" + )] + fn _vrnd64z_f64(a: f64) -> f64; + } + transmute(_vrnd64z_f64(simd_extract!(a, 0))) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.trunc.v2f32" + )] + fn _vrnd_f32(a: float32x2_t) -> float32x2_t; + } + _vrnd_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(str))] -#[allow(clippy::cast_ptr_alignment)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - crate::ptr::write_unaligned(ptr.cast(), a) +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.trunc.v4f32" + )] + fn _vrndq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndq_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" + link_name = "llvm.trunc.v1f64" )] - fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64); + fn _vrnd_f64(a: float64x1_t) -> float64x1_t; } - _vst1_f64_x2(b.0, b.1, a) + _vrnd_f64(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { +#[cfg_attr(test, assert_instr(frintz))] +pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" + link_name = "llvm.trunc.v2f64" )] - fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); + fn _vrndq_f64(a: float64x2_t) -> float64x2_t; } - _vst1q_f64_x2(b.0, b.1, a) + _vrndq_f64(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" + link_name = "llvm.round.v2f32" )] - fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); + fn _vrnda_f32(a: float32x2_t) -> float32x2_t; } - let mut b: float64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1q_f64_x2(b.0, b.1, a) + _vrnda_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" + link_name = "llvm.round.v4f32" )] - fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64); + fn _vrndaq_f32(a: float32x4_t) -> float32x4_t; } - _vst1_f64_x3(b.0, b.1, b.2, a) + _vrndaq_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" + link_name = "llvm.round.v1f64" )] - fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); + fn _vrnda_f64(a: float64x1_t) -> float64x1_t; } - _vst1q_f64_x3(b.0, b.1, b.2, a) + _vrnda_f64(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { +#[cfg_attr(test, assert_instr(frinta))] +pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" + link_name = "llvm.round.v2f64" )] - fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); + fn _vrndaq_f64(a: float64x2_t) -> float64x2_t; } - let mut b: float64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1q_f64_x3(b.0, b.1, b.2, a) + _vrndaq_f64(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" + link_name = "llvm.nearbyint.v2f32" )] - fn _vst1_f64_x4( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - ptr: *mut f64, - ); + fn _vrndi_f32(a: float32x2_t) -> float32x2_t; } - _vst1_f64_x4(b.0, b.1, b.2, b.3, a) + _vrndi_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" + link_name = "llvm.nearbyint.v4f32" )] - fn _vst1q_f64_x4( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - ptr: *mut f64, - ); + fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; } - _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) + _vrndiq_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" + link_name = "llvm.nearbyint.v1f64" )] - fn _vst1q_f64_x4( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - ptr: *mut f64, - ); + fn _vrndi_f64(a: float64x1_t) -> float64x1_t; } - let mut b: float64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) + _vrndi_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +#[cfg_attr(test, assert_instr(frinti))] +pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v2f64" + )] + fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndiq_f64(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.floor.v2f32" + )] + fn _vrndm_f32(a: float32x2_t) -> float32x2_t; + } + _vrndm_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.floor.v4f32" + )] + fn _vrndmq_f32(a: float32x4_t) -> float32x4_t; + } + _vrndmq_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" + link_name = "llvm.floor.v1f64" )] - fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8); + fn _vrndm_f64(a: float64x1_t) -> float64x1_t; } - _vst2_f64(b.0, b.1, a as _) + _vrndm_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { - static_assert!(LANE == 0); +#[cfg_attr(test, assert_instr(frintm))] +pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" + link_name = "llvm.floor.v2f64" )] - fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8); + fn _vrndmq_f64(a: float64x2_t) -> float64x2_t; } - _vst2_lane_f64(b.0, b.1, LANE as i64, a as _) + _vrndmq_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { - static_assert!(LANE == 0); +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" + link_name = "llvm.aarch64.neon.frintn.v1f64" )] - fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8); + fn _vrndn_f64(a: float64x1_t) -> float64x1_t; } - _vst2_lane_s64(b.0, b.1, LANE as i64, a as _) + _vrndn_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { - static_assert!(LANE == 0); - vst2_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v2f64" + )] + fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; + } + _vrndnq_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { - static_assert!(LANE == 0); - vst2_lane_s64::(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(frintn))] +pub unsafe fn vrndns_f32(a: f32) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.roundeven.f32" + )] + fn _vrndns_f32(a: f32) -> f32; + } + _vrndns_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" + link_name = "llvm.ceil.v2f32" )] - fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); + fn _vrndp_f32(a: float32x2_t) -> float32x2_t; } - _vst2q_f64(b.0, b.1, a as _) + _vrndp_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" + link_name = "llvm.ceil.v4f32" )] - fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); + fn _vrndpq_f32(a: float32x4_t) -> float32x4_t; } - let mut b: float64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2q_f64(b.0, b.1, a as _) + _vrndpq_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" + link_name = "llvm.ceil.v1f64" )] - fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); + fn _vrndp_f64(a: float64x1_t) -> float64x1_t; } - _vst2q_s64(b.0, b.1, a as _) + _vrndp_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { +#[cfg_attr(test, assert_instr(frintp))] +pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" + link_name = "llvm.ceil.v2f64" )] - fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); + fn _vrndpq_f64(a: float64x2_t) -> float64x2_t; } - let mut b: int64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2q_s64(b.0, b.1, a as _) + _vrndpq_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" + link_name = "llvm.rint.v2f32" )] - fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); + fn _vrndx_f32(a: float32x2_t) -> float32x2_t; } - _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) + _vrndx_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" + link_name = "llvm.rint.v4f32" )] - fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); + fn _vrndxq_f32(a: float32x4_t) -> float32x4_t; } - let mut b: float64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) + _vrndxq_f32(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" + link_name = "llvm.rint.v1f64" )] - fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); + fn _vrndx_f64(a: float64x1_t) -> float64x1_t; } - _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) + _vrndx_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); +#[cfg_attr(test, assert_instr(frintx))] +pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" + link_name = "llvm.rint.v2f64" )] - fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); + fn _vrndxq_f64(a: float64x2_t) -> float64x2_t; } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) + _vrndxq_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" + link_name = "llvm.aarch64.neon.srshl.i64" )] - fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); + fn _vrshld_s64(a: i64, b: i64) -> i64; } - _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) + _vrshld_s64(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" + link_name = "llvm.aarch64.neon.urshl.i64" )] - fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); + fn _vrshld_u64(a: i64, b: i64) -> i64; } - let mut b: int64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) + _vrshld_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + vrshld_s64(a, -N as i64) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + vrshld_u64(a, -N as i64) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - vst2q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vrshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst2q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vrshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - vst2q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst2q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { - vst2q_s64(transmute(a), transmute(b)) +pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v1f64" + )] + fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; + } + _vrsqrte_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st2))] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { - let mut b: poly64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2q_s64(transmute(a), transmute(b)) +pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f64" + )] + fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; + } + _vrsqrteq_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { - vst2q_s64(transmute(a), transmute(b)) +pub unsafe fn vrsqrted_f64(a: f64) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.f64" + )] + fn _vrsqrted_f64(a: f64) -> f64; + } + _vrsqrted_f64(a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { - let mut b: uint64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2q_s64(transmute(a), transmute(b)) +pub unsafe fn vrsqrtes_f32(a: f32) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.f32" + )] + fn _vrsqrtes_f32(a: f32) -> f32; + } + _vrsqrtes_f32(a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { +pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" + link_name = "llvm.aarch64.neon.frsqrts.v1f64" )] - fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8); + fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; } - _vst3_f64(b.0, b.1, b.2, a as _) + _vrsqrts_f64(a, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { - static_assert!(LANE == 0); +pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" + link_name = "llvm.aarch64.neon.frsqrts.v2f64" )] - fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8); + fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + _vrsqrtsq_f64(a, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { - static_assert!(LANE == 0); +pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" + link_name = "llvm.aarch64.neon.frsqrts.f64" )] - fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8); + fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; } - _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + _vrsqrtsd_f64(a, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.f32" + )] + fn _vrsqrtss_f32(a: f32, b: f32) -> f32; + } + _vrsqrtss_f32(a, b) +} +#[doc = "Signed rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(2)] -pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { - static_assert!(LANE == 0); - vst3_lane_s64::(transmute(a), transmute(b)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + let b: i64 = vrshrd_n_s64::(b); + a.wrapping_add(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] +#[doc = "Unsigned rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + let b: u64 = vrshrd_n_u64::(b); + a.wrapping_add(b) +} +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x: int8x8_t = vrsubhn_s16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { - static_assert!(LANE == 0); - vst3_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x: int16x4_t = vrsubhn_s32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" - )] - fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); - } - _vst3q_f64(b.0, b.1, b.2, a as _) +pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x: int32x2_t = vrsubhn_s64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" - )] - fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); - } - let mut b: float64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3q_f64(b.0, b.1, b.2, a as _) +pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let x: uint8x8_t = vrsubhn_u16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" - )] - fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); - } - _vst3q_s64(b.0, b.1, b.2, a as _) +pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let x: uint16x4_t = vrsubhn_u32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" - )] - fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); - } - let mut b: int64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3q_s64(b.0, b.1, b.2, a as _) +pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let x: uint32x2_t = vrsubhn_u64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" - )] - fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) +pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { +pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" - )] - fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); - } - let mut b: float64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] +#[doc = "SHA512 hash update part 2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h2))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + link_name = "llvm.aarch64.crypto.sha512h2" )] - fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] +#[doc = "SHA512 hash update part 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + link_name = "llvm.aarch64.crypto.sha512h" )] - fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] +#[doc = "SHA512 schedule update 0"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su0))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + link_name = "llvm.aarch64.crypto.sha512su0" )] - fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) + _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] +#[doc = "SHA512 schedule update 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + link_name = "llvm.aarch64.crypto.sha512su1" )] - fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - let mut b: int64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3q_lane_s64::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3q_lane_s64::(transmute(a), transmute(b)) + _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - vst3q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 { + transmute(vshl_s64(transmute(a), transmute(b))) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst3q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { + transmute(vshl_u64(transmute(a), transmute(b))) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vshll_n_s8::(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vshll_n_s16::(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - vst3q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); + vshll_n_s32::(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst3q_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { - vst3q_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { - let mut b: poly64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3q_s64(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + vshll_n_u8::(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { - vst3q_s64(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + vshll_n_u16::(b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { - let mut b: uint64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3q_s64(transmute(a), transmute(b)) +pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + vshll_n_u32::(b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" - )] - fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8); - } - _vst4_f64(b.0, b.1, b.2, b.3, a as _) +pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vshrn_n_s16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" - )] - fn _vst4_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - d: float64x1_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) +pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { - static_assert!(LANE == 0); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" - )] - fn _vst4_lane_s64( - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { - static_assert!(LANE == 0); - vst4_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] -pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { - static_assert!(LANE == 0); - vst4_lane_s64::(transmute(a), transmute(b)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_shuffle!( + a, + vshrn_n_u16::(b), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" - )] - fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); - } - _vst4q_f64(b.0, b.1, b.2, b.3, a as _) +pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" - )] - fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); - } - let mut b: float64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4q_f64(b.0, b.1, b.2, b.3, a as _) +pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v8i8" )] - fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); + fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; } - _vst4q_s64(b.0, b.1, b.2, b.3, a as _) + _vsli_n_s8(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v16i8" )] - fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); + fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; } - let mut b: int64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4q_s64(b.0, b.1, b.2, b.3, a as _) + _vsliq_n_s8(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v4i16" )] - fn _vst4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; } - _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vsli_n_s16(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v8i16" )] - fn _vst4q_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - d: float64x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; } - let mut b: float64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vsliq_n_s16(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" + link_name = "llvm.aarch64.neon.vsli.v2i32" )] - fn _vst4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *mut i8, - ); + fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; } - _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vsli_n_s32(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" + link_name = "llvm.aarch64.neon.vsli.v4i32" )] - fn _vst4q_lane_s8( - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - n: i64, - ptr: *mut i8, - ); + fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vsliq_n_s32(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 0 && N <= 63); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v1i64" )] - fn _vst4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; } - _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vsli_n_s64(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" + link_name = "llvm.aarch64.neon.vsli.v2i64" )] - fn _vst4q_lane_s64( - a: int64x2_t, - b: int64x2_t, - c: int64x2_t, - d: int64x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; } - let mut b: int64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4q_lane_s64::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: poly64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4q_lane_s64::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - vst4q_lane_s8::(transmute(a), transmute(b)) + _vsliq_n_s64(a, b, N) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: uint8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst4q_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4q_lane_s64::(transmute(a), transmute(b)) +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vsli_n_s8::(transmute(a), transmute(b))) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4q_lane_s64::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - vst4q_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vsliq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] -pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { - static_assert_uimm_bits!(LANE, 4); - let mut b: poly8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst4q_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { - vst4q_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { - let mut b: poly64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4q_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { - vst4q_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { - let mut b: uint64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4q_s64(transmute(a), transmute(b)) -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - simd_sub(a, b) -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_sub(a, b) -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { - a.wrapping_sub(b) +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vsli_n_s16::(transmute(a), transmute(b))) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { - a.wrapping_sub(b) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: int16x8_t = simd_cast(c); - let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: int16x8_t = simd_cast(e); - simd_sub(d, f) +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + transmute(vsli_n_s32::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: int16x8_t = simd_cast(c); - let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: int16x8_t = simd_cast(e); - let ret_val: int16x8_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + transmute(vsliq_n_s32::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: int32x4_t = simd_cast(c); - let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: int32x4_t = simd_cast(e); - simd_sub(d, f) +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: int32x4_t = simd_cast(c); - let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: int32x4_t = simd_cast(e); - let ret_val: int32x4_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: int64x2_t = simd_cast(c); - let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: int64x2_t = simd_cast(e); - simd_sub(d, f) +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vsli_n_s8::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] -pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: int64x2_t = simd_cast(c); - let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: int64x2_t = simd_cast(e); - let ret_val: int64x2_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vsliq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint16x8_t = simd_cast(c); - let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint16x8_t = simd_cast(e); - simd_sub(d, f) +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vsli_n_s16::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); - let d: uint16x8_t = simd_cast(c); - let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let f: uint16x8_t = simd_cast(e); - let ret_val: uint16x8_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: uint32x4_t = simd_cast(c); - let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: uint32x4_t = simd_cast(e); - simd_sub(d, f) +pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); - let d: uint32x4_t = simd_cast(c); - let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let f: uint32x4_t = simd_cast(e); - let ret_val: uint32x4_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 0 && N <= 63); + transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: uint64x2_t = simd_cast(c); - let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: uint64x2_t = simd_cast(e); - simd_sub(d, f) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_s64::(transmute(a), transmute(b))) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] -pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); - let d: uint64x2_t = simd_cast(c); - let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let f: uint64x2_t = simd_cast(e); - let ret_val: uint64x2_t = simd_sub(d, f); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 0 && N <= 63); + transmute(vsli_n_u64::(transmute(a), transmute(b))) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] +#[doc = "SM3PARTW1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - simd_sub(a, simd_cast(c)) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3partw1" + )] + fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] +#[doc = "SM3PARTW2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3partw2" + )] + fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] +#[doc = "SM3SS1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - simd_sub(a, simd_cast(c)) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3ss1" + )] + fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] +#[doc = "SM3TT1A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: int32x4_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1a" + )] + fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] +#[doc = "SM3TT1B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); - simd_sub(a, simd_cast(c)) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt1bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1b" + )] + fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] +#[doc = "SM3TT2A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] -pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: int64x2_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2aq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2a" + )] + fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] +#[doc = "SM3TT2B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - simd_sub(a, simd_cast(c)) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm3tt2bq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt2b" + )] + fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + } + _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] +#[doc = "SM4 key"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4ekey" + )] + fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] +#[doc = "SM4 encode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - simd_sub(a, simd_cast(c)) +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4e" + )] + fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); - let ret_val: uint32x4_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i8" + )] + fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vsqadd_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - simd_sub(a, simd_cast(c)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v16i8" + )] + fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vsqaddq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] -pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); - let ret_val: uint64x2_t = simd_sub(a, simd_cast(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i16" + )] + fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vsqadd_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, transmute(c), b) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v8i16" + )] + fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vsqaddq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudot_laneq_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint32x4_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i32" + )] + fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vsqadd_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, transmute(c), b) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v4i32" + )] + fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vsqaddq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(sudot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vsudotq_laneq_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint32x4_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v1i64" + )] + fn _vsqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vsqadd_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "Unsigned saturating Accumulate of Signed value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)) +#[cfg_attr(test, assert_instr(usqadd))] +pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.v2i64" + )] + fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vsqaddq_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 { + simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b) +pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 { + simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.i64" + )] + fn _vsqaddd_u64(a: i64, b: i64) -> i64; + } + _vsqaddd_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "Unsigned saturating accumulate of signed value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b) +pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usqadd.i32" + )] + fn _vsqadds_u32(a: i32, b: i32) -> i32; + } + _vsqadds_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t { + simd_fsqrt(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) +pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { + simd_fsqrt(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t { + simd_fsqrt(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "Calculates the square root of each lane."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) +pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { + simd_fsqrt(a) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i8" + )] + fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + } + _vsri_n_s8(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v16i8" + )] + fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + } + _vsriq_n_s8(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i16" + )] + fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; + } + _vsri_n_s16(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - let x = int8x16x2_t( - vcombine_s8(a.0, a.1), - vcombine_s8(a.2, crate::mem::zeroed()), - ); - transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v8i16" + )] + fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; + } + _vsriq_n_s16(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = int8x16x2_t( - vcombine_s8(a.0, a.1), - vcombine_s8(a.2, crate::mem::zeroed()), - ); - let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i32" + )] + fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + _vsri_n_s32(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t( - vcombine_u8(a.0, a.1), - vcombine_u8(a.2, crate::mem::zeroed()), - ); - transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v4i32" + )] + fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + } + _vsriq_n_s32(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = uint8x16x2_t( - vcombine_u8(a.0, a.1), - vcombine_u8(a.2, crate::mem::zeroed()), - ); - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v1i64" + )] + fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; + } + _vsri_n_s64(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t( - vcombine_p8(a.0, a.1), - vcombine_p8(a.2, crate::mem::zeroed()), - ); - transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsri.v2i64" + )] + fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + } + _vsriq_n_s64(a, b, N) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = poly8x16x2_t( - vcombine_p8(a.0, a.1), - vcombine_p8(a.2, crate::mem::zeroed()), - ); - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsri_n_s8::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); - transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsriq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); - a.3 = simd_shuffle!(a.3, a.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); - let ret_val: int8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsri_n_s16::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); - transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsriq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + transmute(vsri_n_s32::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); - transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + transmute(vsriq_n_s32::(transmute(a), transmute(b))) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - simd_select( - simd_lt::(c, transmute(i8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_s8(b, crate::mem::zeroed())), - transmute(c), - )), - a, - ) +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsriq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_select( - simd_lt::(c, transmute(i8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_s8(b, crate::mem::zeroed())), - transmute(c), - )), - a, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsri_n_s8::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_u8(b, crate::mem::zeroed())), - c, - )), - a, - ) +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(N >= 1 && N <= 8); + transmute(vsriq_n_s8::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_u8(b, crate::mem::zeroed())), - c, - )), - a, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsri_n_s16::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_p8(b, crate::mem::zeroed())), - c, - )), - a, - ) +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(N >= 1 && N <= 16); + transmute(vsriq_n_s16::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - transmute(vqtbx1( - transmute(a), - transmute(vcombine_p8(b, crate::mem::zeroed())), - c, - )), - a, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sri, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) +pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 1 && N <= 64); + transmute(vsriq_n_s64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x2_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_s64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + transmute(vsri_n_u64::(transmute(a), transmute(b))) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x2_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x2_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - let x = int8x16x2_t( - vcombine_s8(b.0, b.1), - vcombine_s8(b.2, crate::mem::zeroed()), - ); - transmute(simd_select( - simd_lt::(transmute(c), transmute(i8x8::splat(24))), - transmute(vqtbx2( - transmute(a), - transmute(x.0), - transmute(x.1), - transmute(c), - )), - a, - )) +pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x3_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = int8x16x2_t( - vcombine_s8(b.0, b.1), - vcombine_s8(b.2, crate::mem::zeroed()), - ); - let ret_val: int8x8_t = transmute(simd_select( - simd_lt::(transmute(c), transmute(i8x8::splat(24))), - transmute(vqtbx2( - transmute(a), - transmute(x.0), - transmute(x.1), - transmute(c), - )), - a, - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t( - vcombine_u8(b.0, b.1), - vcombine_u8(b.2, crate::mem::zeroed()), - ); - transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )) +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x3_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = uint8x16x2_t( - vcombine_u8(b.0, b.1), - vcombine_u8(b.2, crate::mem::zeroed()), - ); - let ret_val: uint8x8_t = transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t( - vcombine_p8(b.0, b.1), - vcombine_p8(b.2, crate::mem::zeroed()), - ); - transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )) +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x3_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let x = poly8x16x2_t( - vcombine_p8(b.0, b.1), - vcombine_p8(b.2, crate::mem::zeroed()), - ); - let ret_val: poly8x8_t = transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - vqtbx2( - transmute(a), - transmute(vcombine_s8(b.0, b.1)), - transmute(vcombine_s8(b.2, b.3)), - transmute(c), - ) +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x4_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqtbx2( - transmute(a), - transmute(vcombine_s8(b.0, b.1)), - transmute(vcombine_s8(b.2, b.3)), - transmute(c), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - transmute(vqtbx2( - transmute(a), - transmute(vcombine_u8(b.0, b.1)), - transmute(vcombine_u8(b.2, b.3)), - c, - )) +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x4_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vqtbx2( - transmute(a), - transmute(vcombine_u8(b.0, b.1)), - transmute(vcombine_u8(b.2, b.3)), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - transmute(vqtbx2( - transmute(a), - transmute(vcombine_p8(b.0, b.1)), - transmute(vcombine_p8(b.2, b.3)), - c, - )) +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x4_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vqtbx2( - transmute(a), - transmute(vcombine_p8(b.0, b.1)), - transmute(vcombine_p8(b.2, b.3)), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(str))] +#[allow(clippy::cast_ptr_alignment)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + crate::ptr::write_unaligned(ptr.cast(), a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64" + )] + fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64); + } + _vst1_f64_x2(b.0, b.1, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64" + )] + fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64); + } + _vst1q_f64_x2(b.0, b.1, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64" + )] + fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64); + } + _vst1_f64_x3(b.0, b.1, b.2, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64" + )] + fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64); + } + _vst1q_f64_x3(b.0, b.1, b.2, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64" + )] + fn _vst1_f64_x4( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + ptr: *mut f64, + ); + } + _vst1_f64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64" + )] + fn _vst1q_f64_x4( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + ptr: *mut f64, + ); + } + _vst1q_f64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v1f64.p0i8" + )] + fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8); + } + _vst2_f64(b.0, b.1, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8" + )] + fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8); + } + _vst2_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8" + )] + fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { + static_assert!(LANE == 0); + vst2_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { + static_assert!(LANE == 0); + vst2_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2f64.p0i8" + )] + fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8); + } + _vst2q_f64(b.0, b.1, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i64.p0i8" + )] + fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8); + } + _vst2q_s64(b.0, b.1, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8" + )] + fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8" + )] + fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8" + )] + fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { + static_assert_uimm_bits!(LANE, 4); + vst2q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { + vst2q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v1f64.p0i8" + )] + fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8); + } + _vst3_f64(b.0, b.1, b.2, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8" + )] + fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8); + } + _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8" + )] + fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { + static_assert!(LANE == 0); + vst3_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { + static_assert!(LANE == 0); + vst3_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [0, 4, 2, 6]) +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2f64.p0i8" + )] + fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8); + } + _vst3q_f64(b.0, b.1, b.2, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2i64.p0i8" + )] + fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8); + } + _vst3q_s64(b.0, b.1, b.2, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8" + )] + fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] -pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8" + )] + fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8" + )] + fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { + static_assert_uimm_bits!(LANE, 4); + vst3q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { + vst3q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v1f64.p0i8" + )] + fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8); + } + _vst4_f64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8" + )] + fn _vst4_lane_f64( + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + d: float64x1_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { + static_assert!(LANE == 0); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8" + )] + fn _vst4_lane_s64( + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { + static_assert!(LANE == 0); + vst4_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { + static_assert!(LANE == 0); + vst4_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2f64.p0i8" + )] + fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8); + } + _vst4q_f64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2i64.p0i8" + )] + fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8); + } + _vst4q_s64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8" + )] + fn _vst4q_lane_f64( + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + d: float64x2_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8" + )] + fn _vst4q_lane_s8( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8" + )] + fn _vst4q_lane_s64( + a: int64x2_t, + b: int64x2_t, + c: int64x2_t, + d: int64x2_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4q_lane_s64::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { + static_assert_uimm_bits!(LANE, 4); + vst4q_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { + vst4q_s64(transmute(a), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(fsub))] +pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + simd_sub(a, b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(test, assert_instr(fsub))] +pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_sub(a, b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 { + a.wrapping_sub(b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 { + a.wrapping_sub(b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: int16x8_t = simd_cast(c); + let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: int16x8_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: int32x4_t = simd_cast(c); + let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: int32x4_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(test, assert_instr(ssubl))] +pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: int64x2_t = simd_cast(c); + let e: int32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: int64x2_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + let d: uint16x8_t = simd_cast(c); + let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + let f: uint16x8_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + let d: uint32x4_t = simd_cast(c); + let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + let f: uint32x4_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(test, assert_instr(usubl))] +pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + let d: uint64x2_t = simd_cast(c); + let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + let f: uint64x2_t = simd_cast(e); + simd_sub(d, f) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(test, assert_instr(ssubw))] +pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) +#[cfg_attr(test, assert_instr(usubw))] +pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); + simd_sub(a, simd_cast(c)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudot_laneq_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, transmute(c), b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [1, 5, 3, 7]) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(sudot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vsudotq_laneq_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: uint32x4_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, transmute(c), b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a, crate::mem::zeroed()), transmute(b)) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a, crate::mem::zeroed()), b) } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] -pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a, crate::mem::zeroed()), b) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - let c: int64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let c: int64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { - let c: poly64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { - let c: poly64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: poly64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - let ret_val: uint64x2_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(a.0, a.1), + vcombine_s8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c: uint64x1_t = simd_and(a, b); - let d: u64x1 = u64x1::new(0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(a.0, a.1), + vcombine_u8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c: uint64x2_t = simd_and(a, b); - let d: u64x2 = u64x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t( + vcombine_u8(a.0, a.1), + vcombine_u8(a.2, crate::mem::zeroed()), + ); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_and(a, b); - let d: u64x2 = u64x2::new(0, 0); - let ret_val: uint64x2_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(a.0, a.1), + vcombine_p8(a.2, crate::mem::zeroed()), + ); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { - transmute(vtst_s64(transmute(a), transmute(b))) +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t( + vcombine_p8(a.0, a.1), + vcombine_p8(a.2, crate::mem::zeroed()), + ); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { - transmute(vtst_u64(transmute(a), transmute(b))) +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i8" - )] - fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vuqadd_s8(a, b.as_signed()) +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i8" - )] - fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vuqadd_s8(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v16i8" - )] - fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vuqaddq_s8(a, b.as_signed()) +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); + transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v16i8" - )] - fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vuqaddq_s8(a, b.as_signed()); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i16" - )] - fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vuqadd_s16(a, b.as_signed()) +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_select( + simd_lt::(c, transmute(i8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_s8(b, crate::mem::zeroed())), + transmute(c), + )), + a, + ) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i16" - )] - fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vuqadd_s16(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_u8(b, crate::mem::zeroed())), + c, + )), + a, + ) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i16" - )] - fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vuqaddq_s16(a, b.as_signed()) +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + transmute(vqtbx1( + transmute(a), + transmute(vcombine_p8(b, crate::mem::zeroed())), + c, + )), + a, + ) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i16" - )] - fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vuqaddq_s16(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i32" - )] - fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vuqadd_s32(a, b.as_signed()) +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i32" - )] - fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vuqadd_s32(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i32" - )] - fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vuqaddq_s32(a, b.as_signed()) +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i32" - )] - fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vuqaddq_s32(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v1i64" - )] - fn _vuqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vuqadd_s64(a, b.as_signed()) +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(b.0, b.1), + vcombine_s8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(i8x8::splat(24))), + transmute(vqtbx2( + transmute(a), + transmute(x.0), + transmute(x.1), + transmute(c), + )), + a, + )) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i64" - )] - fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vuqaddq_s64(a, b.as_signed()) +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(b.0, b.1), + vcombine_u8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )) } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i64" - )] - fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vuqaddq_s64(a, b.as_signed()); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { - simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = uint8x16x2_t( + vcombine_u8(b.0, b.1), + vcombine_u8(b.2, crate::mem::zeroed()), + ); + let ret_val: uint8x8_t = transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { - simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(b.0, b.1), + vcombine_p8(b.2, crate::mem::zeroed()), + ); + transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i64" - )] - fn _vuqaddd_s64(a: i64, b: i64) -> i64; - } - _vuqaddd_s64(a, b.as_signed()) +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let x = poly8x16x2_t( + vcombine_p8(b.0, b.1), + vcombine_p8(b.2, crate::mem::zeroed()), + ); + let ret_val: poly8x8_t = transmute(simd_select( + simd_lt::(transmute(c), transmute(u8x8::splat(24))), + transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), + a, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] +#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i32" - )] - fn _vuqadds_s32(a: i32, b: i32) -> i32; - } - _vuqadds_s32(a, b.as_signed()) +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vqtbx2( + transmute(a), + transmute(vcombine_s8(b.0, b.1)), + transmute(vcombine_s8(b.2, b.3)), + transmute(c), + ) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdot_laneq_s32( - a: int32x2_t, - b: uint8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, b, transmute(c)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vqtbx2( + transmute(a), + transmute(vcombine_u8(b.0, b.1)), + transmute(vcombine_u8(b.2, b.3)), + c, + )) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdot_laneq_s32( - a: int32x2_t, - b: uint8x8_t, - c: int8x16_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int32x4_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx2( + transmute(a), + transmute(vcombine_u8(b.0, b.1)), + transmute(vcombine_u8(b.2, b.3)), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdotq_laneq_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, b, transmute(c)) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vqtbx2( + transmute(a), + transmute(vcombine_p8(b.0, b.1)), + transmute(vcombine_p8(b.2, b.3)), + c, + )) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(test, assert_instr(usdot, LANE = 3))] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub unsafe fn vusdotq_laneq_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x16_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int32x4_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx2( + transmute(a), + transmute(vcombine_p8(b.0, b.1)), + transmute(vcombine_p8(b.2, b.3)), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [0, 4, 2, 6]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] +pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [0, 2, 4, 6]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [1, 5, 3, 7]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] +pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) +pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + let c: int64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] -pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + let c: int64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + let c: poly64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + let c: poly64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c: uint64x1_t = simd_and(a, b); + let d: u64x1 = u64x1::new(0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c: uint64x2_t = simd_and(a, b); + let d: u64x2 = u64x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [1, 3]) +pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 { + transmute(vtst_s64(transmute(a), transmute(b))) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 { + transmute(vtst_u64(transmute(a), transmute(b))) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i8" + )] + fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vuqadd_s8(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v16i8" + )] + fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vuqaddq_s8(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v4i16" + )] + fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vuqadd_s16(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i16" + )] + fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vuqaddq_s16(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i32" + )] + fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vuqadd_s32(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v4i32" + )] + fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vuqaddq_s32(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_shuffle!(a, b, [1, 3]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v1i64" + )] + fn _vuqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vuqadd_s64(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(test, assert_instr(suqadd))] +pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i64" + )] + fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vuqaddq_s64(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 { + simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 { + simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i64" + )] + fn _vuqaddd_s64(a: i64, b: i64) -> i64; + } + _vuqaddd_s64(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i32" + )] + fn _vuqadds_s32(a: i32, b: i32) -> i32; + } + _vuqadds_s32(a, b.as_signed()) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdot_laneq_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x16_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, b, transmute(c)) +} +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(test, assert_instr(usdot, LANE = 3))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub unsafe fn vusdotq_laneq_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x16_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c: int32x4_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, b, transmute(c)) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] +pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle!( a, b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_shuffle!( a, b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [0, 2, 4, 6]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_shuffle!(a, b, [1, 3, 5, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] +pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_shuffle!(a, b, [1, 3]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [1, 3]) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] -pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Exclusive OR and rotate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(IMM6, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.xar" - )] - fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; - } - _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned() +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Exclusive OR and rotate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(IMM6, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.xar" - )] - fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] +pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_shuffle!(a, b, [1, 3]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( a, b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_shuffle!(a, b, [0, 2]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_shuffle!(a, b, [1, 3, 5, 7]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] +pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] +#[doc = "Exclusive OR and rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(IMM6, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.xar" + )] + fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; + } + _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned() } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_shuffle!( a, b, @@ -51274,196 +27256,157 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_shuffle!(a, b, [0, 4, 1, 5]) +pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_shuffle!(a, b, [0, 2]) +pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) +pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -51475,34 +27418,10 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -51510,26 +27429,10 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [0, 4, 1, 5]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -51537,26 +27440,10 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -51564,26 +27451,10 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [0, 2]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] -pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51591,26 +27462,10 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51618,26 +27473,10 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51645,26 +27484,10 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - let a: float64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51672,26 +27495,10 @@ pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51703,34 +27510,10 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51738,26 +27521,10 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51765,26 +27532,10 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51792,26 +27543,10 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51819,26 +27554,10 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51846,26 +27565,10 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51873,26 +27576,10 @@ pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51904,34 +27591,10 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51939,26 +27602,10 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51966,26 +27613,10 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -51993,26 +27624,10 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52020,26 +27635,10 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52047,26 +27646,10 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_shuffle!(a, b, [1, 3]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52074,26 +27657,10 @@ pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52105,34 +27672,10 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { ) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52140,26 +27683,10 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, b, [2, 6, 3, 7]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -52167,44 +27694,13 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_shuffle!(a, b, [1, 3]) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] -pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index ca0a5b2715..868cb1937b 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -223,7 +223,6 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] @@ -239,34 +238,10 @@ unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { _priv_vpadal_s8(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i16.v8i8")] - fn _priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = _priv_vpadal_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] @@ -282,34 +257,10 @@ unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { _priv_vpadalq_s8(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v8i16.v16i8")] - fn _priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = _priv_vpadalq_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] @@ -325,34 +276,10 @@ unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { _priv_vpadal_s16(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i32.v4i16")] - fn _priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x2_t = _priv_vpadal_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] @@ -368,54 +295,10 @@ unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { _priv_vpadalq_s16(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v4i32.v8i16")] - fn _priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = _priv_vpadalq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] - fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; - } - _priv_vpadal_s32(a, b) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] @@ -428,7 +311,6 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v1i64.v2i32")] fn _priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); _priv_vpadal_s32(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] @@ -436,7 +318,6 @@ unsafe fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] @@ -452,34 +333,10 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { _priv_vpadalq_s32(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadals.v2i64.v4i32")] - fn _priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = _priv_vpadalq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] @@ -495,34 +352,10 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] - fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] @@ -538,34 +371,10 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] - fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] @@ -581,34 +390,10 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] - fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x2_t = _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] @@ -624,54 +409,10 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] - fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] - fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; - } - _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] @@ -684,7 +425,6 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; } - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Signed Add and Accumulate Long Pairwise."] @@ -692,7 +432,6 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] @@ -707,35 +446,11 @@ unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] - fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint64x2_t = _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] @@ -757,41 +472,10 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, simd_cast(e)) } #[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = vabd_s8(b, c); - let e: uint8x8_t = simd_cast(d); - let ret_val: int16x8_t = simd_add(a, simd_cast(e)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] @@ -813,41 +497,10 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, simd_cast(e)) } #[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: int16x4_t = vabd_s16(b, c); - let e: uint16x4_t = simd_cast(d); - let ret_val: int32x4_t = simd_add(a, simd_cast(e)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] @@ -868,42 +521,11 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let e: uint32x2_t = simd_cast(d); simd_add(a, simd_cast(e)) } -#[doc = "Signed Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let d: int32x2_t = vabd_s32(b, c); - let e: uint32x2_t = simd_cast(d); - let ret_val: int64x2_t = simd_add(a, simd_cast(e)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] @@ -924,40 +546,10 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t simd_add(a, simd_cast(d)) } #[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: uint8x8_t = vabd_u8(b, c); - let ret_val: uint16x8_t = simd_add(a, simd_cast(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] @@ -978,40 +570,10 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 simd_add(a, simd_cast(d)) } #[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: uint16x4_t = vabd_u16(b, c); - let ret_val: uint32x4_t = simd_add(a, simd_cast(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] @@ -1031,41 +593,11 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) } -#[doc = "Unsigned Absolute difference and Accumulate Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let d: uint32x2_t = vabd_u32(b, c); - let ret_val: uint64x2_t = simd_add(a, simd_cast(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] @@ -1093,11 +625,10 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { _vabd_f32(a, b) } #[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] @@ -1113,32 +644,28 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v2f32" + link_name = "llvm.aarch64.neon.fabd.v4f32" )] - fn _vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vabd_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vabdq_f32(a, b) } -#[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabd) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1148,29 +675,28 @@ pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v4f32" + link_name = "llvm.aarch64.neon.sabd.v8i8" )] - fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] + fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vabdq_f32(a, b) + _vabd_s8(a, b) } -#[doc = "Absolute difference between the arguments of Floating"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] +#[doc = "Absolute difference between the arguments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabd) + assert_instr(sabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1180,29 +706,25 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fabd.v4f32" + link_name = "llvm.aarch64.neon.sabd.v16i8" )] - fn _vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] + fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vabdq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vabdq_s8(a, b) } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(sabd) @@ -1215,26 +737,25 @@ pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i8" + link_name = "llvm.aarch64.neon.sabd.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] - fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] + fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vabd_s8(a, b) + _vabd_s16(a, b) } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(sabd) @@ -1247,29 +768,25 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i8" + link_name = "llvm.aarch64.neon.sabd.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] - fn _vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] + fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vabd_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vabdq_s16(a, b) } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(sabd) @@ -1282,26 +799,25 @@ pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v16i8" + link_name = "llvm.aarch64.neon.sabd.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] - fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] + fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - _vabdq_s8(a, b) + _vabd_s32(a, b) } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(sabd) @@ -1314,36 +830,28 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v16i8" + link_name = "llvm.aarch64.neon.sabd.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] - fn _vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] + fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vabdq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vabdq_s32(a, b) } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1353,29 +861,28 @@ pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i16" + link_name = "llvm.aarch64.neon.uabd.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] - fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] + fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vabd_s16(a, b) + _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1385,32 +892,28 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i16" + link_name = "llvm.aarch64.neon.uabd.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] - fn _vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] + fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vabd_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1420,29 +923,28 @@ pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i16" + link_name = "llvm.aarch64.neon.uabd.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] - fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] + fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vabdq_s16(a, b) + _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1452,32 +954,28 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v8i16" + link_name = "llvm.aarch64.neon.uabd.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] - fn _vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] + fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vabdq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1487,29 +985,28 @@ pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v2i32" + link_name = "llvm.aarch64.neon.uabd.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] - fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] + fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - _vabd_s32(a, b) + _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() } #[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(uabd) )] #[cfg_attr( not(target_arch = "arm"), @@ -1519,32 +1016,28 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v2i32" + link_name = "llvm.aarch64.neon.uabd.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] - fn _vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] + fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vabd_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1554,29 +1047,21 @@ pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] - fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vabdq_s32(a, b) +pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let c: uint8x8_t = simd_cast(vabd_s8(a, b)); + simd_cast(c) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabd) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1586,32 +1071,21 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sabd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] - fn _vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vabdq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let c: uint16x4_t = simd_cast(vabd_s16(a, b)); + simd_cast(c) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] +#[doc = "Signed Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(sabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1621,29 +1095,21 @@ pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] - fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let c: uint32x2_t = simd_cast(vabd_s32(a, b)); + simd_cast(c) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1653,32 +1119,20 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] - fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + simd_cast(vabd_u8(a, b)) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1688,29 +1142,20 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] - fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + simd_cast(vabd_u16(a, b)) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] +#[doc = "Unsigned Absolute difference Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(uabdl) )] #[cfg_attr( not(target_arch = "arm"), @@ -1720,36 +1165,20 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] - fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + simd_cast(vabd_u32(a, b)) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1759,29 +1188,20 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] - fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { + simd_fabs(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] +#[doc = "Floating-point absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(fabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1791,32 +1211,20 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] - fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { + simd_fabs(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1826,29 +1234,28 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i16" + link_name = "llvm.aarch64.neon.abs.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] - fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] + fn _vabs_s8(a: int8x8_t) -> int8x8_t; } - _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vabs_s8(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1858,32 +1265,28 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v8i16" + link_name = "llvm.aarch64.neon.abs.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] - fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] + fn _vabsq_s8(a: int8x16_t) -> int8x16_t; } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vabsq_s8(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1893,29 +1296,28 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v2i32" + link_name = "llvm.aarch64.neon.abs.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] - fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] + fn _vabs_s16(a: int16x4_t) -> int16x4_t; } - _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vabs_s16(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1925,32 +1327,28 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v2i32" + link_name = "llvm.aarch64.neon.abs.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] - fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] + fn _vabsq_s16(a: int16x8_t) -> int16x8_t; } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vabsq_s16(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1960,29 +1358,28 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i32" + link_name = "llvm.aarch64.neon.abs.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] - fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] + fn _vabs_s32(a: int32x2_t) -> int32x2_t; } - _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vabs_s32(a) } -#[doc = "Absolute difference between the arguments"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] +#[doc = "Absolute value (wrapping)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabd) + assert_instr(abs) )] #[cfg_attr( not(target_arch = "arm"), @@ -1992,32 +1389,28 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uabd.v4i32" + link_name = "llvm.aarch64.neon.abs.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] - fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] + fn _vabsq_s32(a: int32x4_t) -> int32x4_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vabsq_s32(a) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2027,22 +1420,20 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let c: uint8x8_t = simd_cast(vabd_s8(a, b)); - simd_cast(c) +pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + simd_xor(a, b) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2052,25 +1443,20 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_cast(vabd_s8(a, b)); - let ret_val: int16x8_t = simd_cast(c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + simd_xor(a, b) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2080,22 +1466,20 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let c: uint16x4_t = simd_cast(vabd_s16(a, b)); - simd_cast(c) +pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + simd_xor(a, b) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2105,25 +1489,20 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_cast(vabd_s16(a, b)); - let ret_val: int32x4_t = simd_cast(c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + simd_xor(a, b) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2133,22 +1512,20 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let c: uint32x2_t = simd_cast(vabd_s32(a, b)); - simd_cast(c) +pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + simd_xor(a, b) } -#[doc = "Signed Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2158,25 +1535,20 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_cast(vabd_s32(a, b)); - let ret_val: int64x2_t = simd_cast(c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + simd_xor(a, b) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -2186,72 +1558,128 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - simd_cast(vabd_u8(a, b)) +pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { + a ^ b } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesd))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_cast(vabd_u8(a, b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesd" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] + fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned() } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aese))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] +pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aese" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] + fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + } + _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned() +} +#[doc = "AES inverse mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesimc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - simd_cast(vabd_u16(a, b)) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesimc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] + fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; + } + _vaesimcq_u8(data.as_signed()).as_unsigned() } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] +#[doc = "AES mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesmc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesmc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] + fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; + } + _vaesmcq_u8(data.as_signed()).as_unsigned() +} +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2261,24 +1689,20 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_cast(vabd_u16(a, b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_and(a, b) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2288,21 +1712,20 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - simd_cast(vabd_u32(a, b)) +pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_and(a, b) } -#[doc = "Unsigned Absolute difference Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uabdl) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2312,24 +1735,20 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_cast(vabd_u32(a, b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_and(a, b) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2339,21 +1758,20 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { - simd_fabs(a) +pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_and(a, b) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2363,23 +1781,20 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_fabs(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_and(a, b) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2389,21 +1804,20 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { - simd_fabs(a) +pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_and(a, b) } -#[doc = "Floating-point absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fabs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2413,23 +1827,20 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_fabs(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2439,29 +1850,20 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] - fn _vabs_s8(a: int8x8_t) -> int8x8_t; - } - _vabs_s8(a) +pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2471,31 +1873,20 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] - fn _vabs_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vabs_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2505,29 +1896,20 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] - fn _vabsq_s8(a: int8x16_t) -> int8x16_t; - } - _vabsq_s8(a) +pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2537,35 +1919,20 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] - fn _vabsq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vabsq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2575,29 +1942,20 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] - fn _vabs_s16(a: int16x4_t) -> int16x4_t; - } - _vabs_s16(a) +pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2607,31 +1965,20 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] - fn _vabs_s16(a: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vabs_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2641,29 +1988,20 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] - fn _vabsq_s16(a: int16x8_t) -> int16x8_t; - } - _vabsq_s16(a) +pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2673,31 +2011,20 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] - fn _vabsq_s16(a: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vabsq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] +#[doc = "Vector bitwise and"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(and) )] #[cfg_attr( not(target_arch = "arm"), @@ -2707,29 +2034,20 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] - fn _vabs_s32(a: int32x2_t) -> int32x2_t; - } - _vabs_s32(a) +pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_and(a, b) } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -2739,31 +2057,28 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { +pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i32" + link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] - fn _vabs_s32(a: int32x2_t) -> int32x2_t; + fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vabs_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vcage_f32(a, b).as_unsigned() } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] +#[doc = "Floating-point absolute compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -2773,29 +2088,28 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { +pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i32" + link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] - fn _vabsq_s32(a: int32x4_t) -> int32x4_t; + fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; } - _vabsq_s32(a) + _vcageq_f32(a, b).as_unsigned() } -#[doc = "Absolute value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(abs) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -2805,31 +2119,28 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { +pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i32" + link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] - fn _vabsq_s32(a: int32x4_t) -> int32x4_t; + fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vabsq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vcagt_f32(a, b).as_unsigned() } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] +#[doc = "Floating-point absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -2839,21 +2150,28 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - simd_xor(a, b) +pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" + )] + fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + } + _vcagtq_f32(a, b).as_unsigned() } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -2863,24 +2181,20 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcage_f32(b, a) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] +#[doc = "Floating-point absolute compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facge) )] #[cfg_attr( not(target_arch = "arm"), @@ -2890,21 +2204,20 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - simd_xor(a, b) +pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcageq_f32(b, a) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -2914,28 +2227,20 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_xor(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + vcagt_f32(b, a) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] +#[doc = "Floating-point absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(facgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -2945,21 +2250,20 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - simd_xor(a, b) +pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + vcagtq_f32(b, a) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -2969,24 +2273,20 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] +#[doc = "Floating-point compare equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fcmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -2996,21 +2296,20 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - simd_xor(a, b) +pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3020,23 +2319,20 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3046,21 +2342,20 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - simd_xor(a, b) +pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3070,21 +2365,20 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - simd_xor(a, b) +pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3094,23 +2388,20 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_eq(a, b) } -#[doc = "Bitwise exclusive OR"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3120,295 +2411,112 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { - a ^ b +pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesd))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesd" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] - fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; - } - _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned() -} -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesd))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesd" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] - fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; - } - let data: uint8x16_t = simd_shuffle!( - data, - data, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let key: uint8x16_t = simd_shuffle!( - key, - key, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let ret_val: uint8x16_t = _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aese))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aese" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] - fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; - } - _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned() -} -#[doc = "AES single round encryption."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aese))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aese" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] - fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; - } - let data: uint8x16_t = simd_shuffle!( - data, - data, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let key: uint8x16_t = simd_shuffle!( - key, - key, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let ret_val: uint8x16_t = _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "AES inverse mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesimc))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesimc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] - fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; - } - _vaesimcq_u8(data.as_signed()).as_unsigned() -} -#[doc = "AES inverse mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesimc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesimc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] - fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; - } - let data: uint8x16_t = simd_shuffle!( - data, - data, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let ret_val: uint8x16_t = _vaesimcq_u8(data.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "AES mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesmc))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesmc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] - fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; - } - _vaesmcq_u8(data.as_signed()).as_unsigned() -} -#[doc = "AES mix columns."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(aesmc))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.aesmc" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] - fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; - } - let data: uint8x16_t = simd_shuffle!( - data, - data, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - let ret_val: uint8x16_t = _vaesmcq_u8(data.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3418,21 +2526,20 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_and(a, b) +pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3442,24 +2549,20 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3469,21 +2572,20 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_and(a, b) +pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3493,28 +2595,20 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_and(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] +#[doc = "Compare bitwise Equal (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmeq) )] #[cfg_attr( not(target_arch = "arm"), @@ -3524,21 +2618,20 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_and(a, b) +pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + simd_eq(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3548,24 +2641,20 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] +#[doc = "Floating-point compare greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3575,21 +2664,20 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_and(a, b) +pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3599,24 +2687,20 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3626,21 +2710,20 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_and(a, b) +pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3650,24 +2733,20 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3677,21 +2756,20 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_and(a, b) +pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3701,23 +2779,20 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] +#[doc = "Compare signed greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -3727,21 +2802,20 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_and(a, b) +pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3751,21 +2825,20 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_and(a, b) +pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3775,24 +2848,20 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3802,21 +2871,20 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_and(a, b) +pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3826,24 +2894,20 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3853,21 +2917,20 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_and(a, b) +pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] +#[doc = "Compare unsigned greater than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -3877,28 +2940,20 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_and(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_ge(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3908,21 +2963,20 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_and(a, b) +pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] +#[doc = "Floating-point compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3932,24 +2986,20 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3959,21 +3009,20 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_and(a, b) +pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -3983,24 +3032,20 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4010,21 +3055,20 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_and(a, b) +pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4034,24 +3078,20 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4061,21 +3101,20 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_and(a, b) +pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] +#[doc = "Compare signed greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4085,23 +3124,20 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4111,21 +3147,20 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_and(a, b) +pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4135,21 +3170,20 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_and(a, b) +pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_gt(a, b) } -#[doc = "Vector bitwise and"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(and) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4159,24 +3193,20 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_and(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_gt(a, b) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4186,29 +3216,20 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" - )] - fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; - } - _vcage_f32(a, b).as_unsigned() +pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_gt(a, b) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4218,32 +3239,20 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" - )] - fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vcage_f32(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_gt(a, b) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] +#[doc = "Compare unsigned greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -4253,29 +3262,20 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" - )] - fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; - } - _vcageq_f32(a, b).as_unsigned() +pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_gt(a, b) } -#[doc = "Floating-point absolute compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4285,32 +3285,20 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" - )] - fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcageq_f32(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] +#[doc = "Floating-point compare less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(fcmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4320,29 +3308,20 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" - )] - fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; - } - _vcagt_f32(a, b).as_unsigned() +pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4352,32 +3331,20 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" - )] - fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vcagt_f32(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4387,29 +3354,20 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" - )] - fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; - } - _vcagtq_f32(a, b).as_unsigned() +pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4419,32 +3377,20 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" - )] - fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcagtq_f32(a, b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4454,21 +3400,20 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcage_f32(b, a) +pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4478,24 +3423,20 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = vcage_f32(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] +#[doc = "Compare signed less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmge) )] #[cfg_attr( not(target_arch = "arm"), @@ -4505,21 +3446,20 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcageq_f32(b, a) +pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facge) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4529,24 +3469,20 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vcageq_f32(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4556,21 +3492,20 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - vcagt_f32(b, a) +pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4580,24 +3515,20 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = vcagt_f32(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4607,21 +3538,20 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - vcagtq_f32(b, a) +pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_le(a, b) } -#[doc = "Floating-point absolute compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(facgt) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4631,24 +3561,20 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vcagtq_f32(b, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_le(a, b) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] +#[doc = "Compare unsigned less than or equal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(cmhs) )] #[cfg_attr( not(target_arch = "arm"), @@ -4658,21 +3584,20 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_le(a, b) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4682,24 +3607,28 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i8" + )] + fn _vcls_s8(a: int8x8_t) -> int8x8_t; + } + _vcls_s8(a) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4709,21 +3638,28 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v16i8" + )] + fn _vclsq_s8(a: int8x16_t) -> int8x16_t; + } + _vclsq_s8(a) } -#[doc = "Floating-point compare equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4733,24 +3669,28 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v4i16" + )] + fn _vcls_s16(a: int16x4_t) -> int16x4_t; + } + _vcls_s16(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4760,21 +3700,28 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v8i16" + )] + fn _vclsq_s16(a: int16x8_t) -> int16x8_t; + } + _vclsq_s16(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4784,24 +3731,28 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v2i32" + )] + fn _vcls_s32(a: int32x2_t) -> int32x2_t; + } + _vcls_s32(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4811,21 +3762,28 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.cls.v4i32" + )] + fn _vclsq_s32(a: int32x4_t) -> int32x4_t; + } + _vclsq_s32(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4835,28 +3793,20 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_eq(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { + vcls_s8(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4866,21 +3816,20 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_eq(a, b) +pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { + vclsq_s8(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4890,24 +3839,20 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { + vcls_s16(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4917,21 +3862,20 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_eq(a, b) +pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { + vclsq_s16(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4941,24 +3885,20 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { + vcls_s32(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cls) )] #[cfg_attr( not(target_arch = "arm"), @@ -4968,21 +3908,20 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { + vclsq_s32(transmute(a)) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -4992,24 +3931,20 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] +#[doc = "Floating-point compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(fcmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5019,21 +3954,20 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5043,24 +3977,20 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5070,21 +4000,20 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5094,24 +4023,20 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5121,21 +4046,20 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5145,28 +4069,20 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_eq(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] +#[doc = "Compare signed less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmgt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5176,21 +4092,20 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_eq(a, b) +pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5200,24 +4115,20 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5227,21 +4138,20 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_eq(a, b) +pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5251,24 +4161,20 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5278,21 +4184,20 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_eq(a, b) +pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5302,24 +4207,20 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] +#[doc = "Compare unsigned less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(cmhi) )] #[cfg_attr( not(target_arch = "arm"), @@ -5329,21 +4230,20 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_eq(a, b) +pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_lt(a, b) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5353,24 +4253,28 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v8i8" + )] + fn _vclz_s8(a: int8x8_t) -> int8x8_t; + } + _vclz_s8(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5380,21 +4284,28 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - simd_eq(a, b) +pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v16i8" + )] + fn _vclzq_s8(a: int8x16_t) -> int8x16_t; + } + _vclzq_s8(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5404,24 +4315,28 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_eq(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v4i16" + )] + fn _vclz_s16(a: int16x4_t) -> int16x4_t; + } + _vclz_s16(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5431,21 +4346,28 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - simd_eq(a, b) +pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v8i16" + )] + fn _vclzq_s16(a: int16x8_t) -> int16x8_t; + } + _vclzq_s16(a) } -#[doc = "Compare bitwise Equal (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmeq) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5455,28 +4377,28 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_eq(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v2i32" + )] + fn _vclz_s32(a: int32x2_t) -> int32x2_t; + } + _vclz_s32(a) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5486,21 +4408,29 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctlz.v4i32" + )] + fn _vclzq_s32(a: int32x4_t) -> int32x4_t; + } + _vclzq_s32(a) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5510,24 +4440,21 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + transmute(vclz_s16(transmute(a))) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5537,21 +4464,23 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Floating-point compare greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5561,24 +4490,21 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + transmute(vclzq_s16(transmute(a))) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5588,21 +4514,23 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_ge(a, b) +pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5612,24 +4540,21 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + transmute(vclz_s32(transmute(a))) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5639,52 +4564,23 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_ge(a, b) +pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_ge(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5694,21 +4590,21 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_ge(a, b) +pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + transmute(vclzq_s32(transmute(a))) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5718,24 +4614,23 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5745,21 +4640,21 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_ge(a, b) +pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vclz_s8(transmute(a))) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5769,24 +4664,23 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5796,21 +4690,21 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vclzq_s8(transmute(a))) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(clz) )] #[cfg_attr( not(target_arch = "arm"), @@ -5820,24 +4714,26 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5847,21 +4743,28 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] + fn _vcnt_s8(a: int8x8_t) -> int8x8_t; + } + _vcnt_s8(a) } -#[doc = "Compare signed greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5871,24 +4774,29 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.ctpop.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] + fn _vcntq_s8(a: int8x16_t) -> int8x16_t; + } + _vcntq_s8(a) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5898,21 +4806,21 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_ge(a, b) +pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + transmute(vcnt_s8(transmute(a))) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5922,24 +4830,23 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5949,21 +4856,21 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_ge(a, b) +pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + transmute(vcntq_s8(transmute(a))) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -5973,28 +4880,27 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_ge(a, b); +pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6004,21 +4910,21 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_ge(a, b) +pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + transmute(vcnt_s8(transmute(a))) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6028,24 +4934,23 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6055,21 +4960,21 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_ge(a, b) +pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + transmute(vcntq_s8(transmute(a))) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] +#[doc = "Population count per byte."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(cnt) )] #[cfg_attr( not(target_arch = "arm"), @@ -6079,25 +4984,23 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6106,22 +5009,17 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_ge(a, b) +pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6130,25 +5028,17 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6157,22 +5047,17 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_ge(a, b) +pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare unsigned greater than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6181,25 +5066,17 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_ge(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6208,22 +5085,17 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6232,25 +5104,17 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6259,22 +5123,17 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Floating-point compare greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6283,25 +5142,17 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + simd_shuffle!(a, b, [0, 1, 2, 3]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6310,22 +5161,17 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_gt(a, b) +pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6334,25 +5180,17 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6361,22 +5199,17 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_gt(a, b) +pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] +#[doc = "Vector combine"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -6385,28 +5218,21 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_gt(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + simd_shuffle!(a, b, [0, 1]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6416,21 +5242,21 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_gt(a, b) +pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { + transmute(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6440,24 +5266,22 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6467,21 +5291,21 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_gt(a, b) +pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { + transmute(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6491,24 +5315,22 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6518,21 +5340,21 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { + transmute(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6542,24 +5364,22 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6569,48 +5389,21 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { + transmute(a) } -#[doc = "Compare signed greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6620,21 +5413,21 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_gt(a, b) +pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6644,24 +5437,21 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6671,21 +5461,21 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_gt(a, b) +pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6695,28 +5485,22 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_gt(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6726,21 +5510,21 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_gt(a, b) +pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6750,24 +5534,22 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6777,48 +5559,21 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_gt(a, b) +pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6828,21 +5583,21 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_gt(a, b) +pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6852,24 +5607,21 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6879,21 +5631,21 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_gt(a, b) +pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { + transmute(a) } -#[doc = "Compare unsigned greater than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6903,24 +5655,22 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_gt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6930,21 +5680,21 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_le(a, b) +pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { + transmute(a) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6954,24 +5704,21 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -6981,21 +5728,20 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_le(a, b) +pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { + transmute(a) } -#[doc = "Floating-point compare less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmge) + assert_instr(scvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -7005,24 +5751,20 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { + simd_cast(a) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(scvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -7032,21 +5774,20 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_le(a, b) +pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { + simd_cast(a) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(ucvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -7056,24 +5797,20 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { + simd_cast(a) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(ucvtf) )] #[cfg_attr( not(target_arch = "arm"), @@ -7083,205 +5820,356 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_le(a, b) +pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { + simd_cast(a) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_le(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_s32(a, N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_le(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + } + _vcvtq_n_f32_s32(a, N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_s32(a, N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_le(a, b) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(scvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; + } + _vcvtq_n_f32_s32(a, N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_u32(a.as_signed(), N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_le(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + } + _vcvtq_n_f32_u32(a.as_signed(), N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" + )] + fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + } + _vcvt_n_f32_u32(a.as_signed(), N) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] +#[doc = "Fixed-point convert to floating-point"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ucvtf, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" + )] + fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + } + _vcvtq_n_f32_u32(a.as_signed(), N) +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_s32_f32(a, N) +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_s32_f32(a, N) +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" + )] + fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_s32_f32(a, N) +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" + )] + fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_s32_f32(a, N) +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_u32_f32(a, N).as_unsigned() +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vcvt, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "arm", + link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_u32_f32(a, N).as_unsigned() +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + )] + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + } + _vcvt_n_u32_f32(a, N).as_unsigned() +} +#[doc = "Floating-point convert to fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + )] + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + } + _vcvtq_n_u32_f32(a, N).as_unsigned() +} +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7291,21 +6179,28 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_le(a, b) +pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v2i32.v2f32" + )] + fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvt_s32_f32(a) } -#[doc = "Compare signed less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] +#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmge) + assert_instr(fcvtzs) )] #[cfg_attr( not(target_arch = "arm"), @@ -7315,24 +6210,28 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptosi.sat.v4i32.v4f32" + )] + fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtq_s32_f32(a) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -7342,21 +6241,28 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_le(a, b) +pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v2i32.v2f32" + )] + fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + } + _vcvt_u32_f32(a).as_unsigned() } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] +#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(fcvtzu) )] #[cfg_attr( not(target_arch = "arm"), @@ -7366,233 +6272,274 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.fptoui.sat.v4i32.v4f32" + )] + fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + } + _vcvtq_u32_f32(a).as_unsigned() } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(sdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_le(a, b) +pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_s32(a, b, transmute(c)) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(sdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_le(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_s32(a, b, transmute(c)) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(udot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_le(a, b) +pub unsafe fn vdot_lane_u32( + a: uint32x2_t, + b: uint8x8_t, + c: uint8x8_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vdot_u32(a, b, transmute(c)) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] +#[doc = "Dot product arithmetic (indexed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(udot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdotq_lane_u32( + a: uint32x4_t, + b: uint8x16_t, + c: uint8x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vdotq_u32(a, b, transmute(c)) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(sdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_le(a, b) +pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" + )] + fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + _vdot_s32(a, b, c) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(sdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" + )] + fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vdotq_s32(a, b, c) } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(udot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_le(a, b) +pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" + )] + fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(udot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_dotprod", issue = "117224") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" + )] + fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7601,22 +6548,23 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_le(a, b) -} -#[doc = "Compare unsigned less than or equal"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] +pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhs) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7625,25 +6573,23 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_le(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7652,30 +6598,23 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i8" - )] - fn _vcls_s8(a: int8x8_t) -> int8x8_t; - } - _vcls_s8(a) +pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7684,32 +6623,23 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i8" - )] - fn _vcls_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vcls_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7718,30 +6648,23 @@ pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v16i8" - )] - fn _vclsq_s8(a: int8x16_t) -> int8x16_t; - } - _vclsq_s8(a) +pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7750,36 +6673,23 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v16i8" - )] - fn _vclsq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vclsq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7788,30 +6698,23 @@ pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i16" - )] - fn _vcls_s16(a: int16x4_t) -> int16x4_t; - } - _vcls_s16(a) +pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7820,32 +6723,23 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i16" - )] - fn _vcls_s16(a: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vcls_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7854,30 +6748,23 @@ pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i16" - )] - fn _vclsq_s16(a: int16x8_t) -> int16x8_t; - } - _vclsq_s16(a) +pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7886,32 +6773,27 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v8i16" - )] - fn _vclsq_s16(a: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vclsq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7920,30 +6802,27 @@ pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v2i32" - )] - fn _vcls_s32(a: int32x2_t) -> int32x2_t; - } - _vcls_s32(a) +pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7952,32 +6831,27 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v2i32" - )] - fn _vcls_s32(a: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcls_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -7986,30 +6860,27 @@ pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i32" - )] - fn _vclsq_s32(a: int32x4_t) -> int32x4_t; - } - _vclsq_s32(a) +pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8018,32 +6889,27 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.cls.v4i32" - )] - fn _vclsq_s32(a: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vclsq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8052,22 +6918,27 @@ pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { - vcls_s8(transmute(a)) +pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8076,24 +6947,30 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vcls_s8(transmute(a)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8102,22 +6979,30 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { - vclsq_s8(transmute(a)) +pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8126,28 +7011,30 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vclsq_s8(transmute(a)); +pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] ) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8156,22 +7043,23 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { - vcls_s16(transmute(a)) +pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(nop, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8180,24 +7068,23 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = vcls_s16(transmute(a)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8206,22 +7093,23 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { - vclsq_s16(transmute(a)) +pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8230,24 +7118,23 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vclsq_s16(transmute(a)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8256,22 +7143,23 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { - vcls_s32(transmute(a)) +pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8280,24 +7168,23 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = vcls_s32(transmute(a)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8306,22 +7193,23 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { - vclsq_s32(transmute(a)) +pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Count leading sign bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cls) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8330,24 +7218,23 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vclsq_s32(transmute(a)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8356,22 +7243,23 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8380,25 +7268,23 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8407,22 +7293,23 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } -#[doc = "Floating-point compare less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8431,25 +7318,27 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8458,22 +7347,27 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - simd_lt(a, b) +pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8482,25 +7376,27 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8509,22 +7405,27 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - simd_lt(a, b) +pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8533,29 +7434,27 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_lt(a, b); +pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8564,22 +7463,27 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - simd_lt(a, b) +pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8588,25 +7492,30 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8615,22 +7524,30 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - simd_lt(a, b) +pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8639,25 +7556,30 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + simd_shuffle!( + a, + a, + [ + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, + N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 + ] + ) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8666,22 +7588,23 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8690,25 +7613,23 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 1); + transmute::(simd_extract!(a, N as u32)) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8717,22 +7638,23 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Compare signed less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmgt) + assert_instr(dup, N = 0) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8741,25 +7663,23 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8768,22 +7688,23 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_lt(a, b) +pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -8792,24 +7713,21 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + simd_shuffle!(a, a, [N as u32, N as u32]) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8819,21 +7737,20 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_lt(a, b) +pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8843,28 +7760,20 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_lt(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8874,21 +7783,20 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_lt(a, b) +pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8898,24 +7806,20 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8925,21 +7829,20 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_lt(a, b) +pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8949,24 +7852,20 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -8976,21 +7875,20 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_lt(a, b) +pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9000,24 +7898,20 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9027,21 +7921,20 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_lt(a, b) +pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_xor(a, b) } -#[doc = "Compare unsigned less than"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmhi) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9051,24 +7944,20 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_lt(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9078,29 +7967,20 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i8" - )] - fn _vclz_s8(a: int8x8_t) -> int8x8_t; - } - _vclz_s8(a) +pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9110,31 +7990,20 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i8" - )] - fn _vclz_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vclz_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9144,29 +8013,20 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v16i8" - )] - fn _vclzq_s8(a: int8x16_t) -> int8x16_t; - } - _vclzq_s8(a) +pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9176,35 +8036,20 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v16i8" - )] - fn _vclzq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vclzq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9214,29 +8059,20 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i16" - )] - fn _vclz_s16(a: int16x4_t) -> int16x4_t; - } - _vclz_s16(a) +pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -9246,32 +8082,22 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i16" - )] - fn _vclz_s16(a: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vclz_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_xor(a, b) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9280,30 +8106,27 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i16" - )] - fn _vclzq_s16(a: int16x8_t) -> int16x8_t; +pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), } - _vclzq_s16(a) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9312,32 +8135,27 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v8i16" - )] - fn _vclzq_s16(a: int16x8_t) -> int16x8_t; +pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vclzq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9346,30 +8164,27 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v2i32" - )] - fn _vclz_s32(a: int32x2_t) -> int32x2_t; +pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), } - _vclz_s32(a) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9378,32 +8193,33 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v2i32" - )] - fn _vclz_s32(a: int32x2_t) -> int32x2_t; +pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vclz_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9412,30 +8228,33 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i32" - )] - fn _vclzq_s32(a: int32x4_t) -> int32x4_t; +pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), } - _vclzq_s32(a) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9444,56 +8263,33 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctlz.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctlz.v4i32" - )] - fn _vclzq_s32(a: int32x4_t) -> int32x4_t; +pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vclzq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - transmute(vclz_s16(transmute(a))) } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9502,24 +8298,33 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9528,22 +8333,33 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - transmute(vclzq_s16(transmute(a))) +pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 7) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9552,24 +8368,33 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + match N & 0b111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), + 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), + 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), + 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), + 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9578,22 +8403,29 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - transmute(vclz_s32(transmute(a))) +pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9602,24 +8434,29 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9628,22 +8465,29 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - transmute(vclzq_s32(transmute(a))) +pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9652,24 +8496,29 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9678,22 +8527,29 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vclz_s8(transmute(a))) +pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9702,24 +8558,29 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + match N & 0b11 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), + 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), + 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), + 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9728,22 +8589,27 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vclzq_s8(transmute(a))) +pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Count leading zero bits"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(clz) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9752,28 +8618,27 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + match N & 0b1 { + 0 => simd_shuffle!(a, b, [0, 1]), + 1 => simd_shuffle!(a, b, [1, 2]), + _ => unreachable_unchecked(), + } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9782,30 +8647,101 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] - fn _vcnt_s8(a: int8x8_t) -> int8x8_t; - } - _vcnt_s8(a) +pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), + } } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9814,32 +8750,101 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v8i8")] - fn _vcnt_s8(a: int8x8_t) -> int8x8_t; +pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vcnt_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -9848,29 +8853,99 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] - fn _vcntq_s8(a: int8x16_t) -> int8x16_t; +pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + match N & 0b1111 { + 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), + 1 => simd_shuffle!( + a, + b, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + ), + 2 => simd_shuffle!( + a, + b, + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] + ), + 3 => simd_shuffle!( + a, + b, + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] + ), + 4 => simd_shuffle!( + a, + b, + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ), + 5 => simd_shuffle!( + a, + b, + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + ), + 6 => simd_shuffle!( + a, + b, + [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] + ), + 7 => simd_shuffle!( + a, + b, + [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] + ), + 8 => simd_shuffle!( + a, + b, + [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + ), + 9 => simd_shuffle!( + a, + b, + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ), + 10 => simd_shuffle!( + a, + b, + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ), + 11 => simd_shuffle!( + a, + b, + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + ), + 12 => simd_shuffle!( + a, + b, + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] + ), + 13 => simd_shuffle!( + a, + b, + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] + ), + 14 => simd_shuffle!( + a, + b, + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ), + 15 => simd_shuffle!( + a, + b, + [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] + ), + _ => unreachable_unchecked(), } - _vcntq_s8(a) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -9880,35 +8955,25 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { +pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.ctpop.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ctpop.v16i8")] - fn _vcntq_s8(a: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] + fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vcntq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vfma_f32(b, c, a) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -9918,21 +8983,25 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - transmute(vcnt_s8(transmute(a))) +pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] + fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + _vfmaq_f32(b, c, a) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -9942,23 +9011,20 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfma_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -9968,21 +9034,20 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - transmute(vcntq_s8(transmute(a))) +pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -9992,27 +9057,21 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + let b: float32x2_t = simd_neg(b); + vfma_f32(a, b, c) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -10022,21 +9081,21 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - transmute(vcnt_s8(transmute(a))) +pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + let b: float32x4_t = simd_neg(b); + vfmaq_f32(a, b, c) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -10046,23 +9105,20 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfms_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -10072,21 +9128,20 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - transmute(vcntq_s8(transmute(a))) +pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Population count per byte."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cnt) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -10096,24 +9151,29 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] + fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhadd_s8(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10122,18 +9182,29 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { - simd_shuffle!(a, b, [0, 1, 2, 3]) +pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] + fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhaddq_s8(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10142,21 +9213,29 @@ pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] + fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhadd_s16(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10165,46 +9244,29 @@ pub unsafe fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] + fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhaddq_s16(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) )] -pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10213,41 +9275,29 @@ pub unsafe fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] + fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhadd_s32(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shadd) )] -pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10256,41 +9306,29 @@ pub unsafe fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { - simd_shuffle!(a, b, [0, 1, 2, 3]) +pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] + fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhaddq_s32(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) )] -pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10299,18 +9337,29 @@ pub unsafe fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { - simd_shuffle!(a, b, [0, 1]) +pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] + fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10319,19 +9368,29 @@ pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { - let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 1]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] + fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10340,18 +9399,29 @@ pub unsafe fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] + fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10360,26 +9430,29 @@ pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] + fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10388,18 +9461,29 @@ pub unsafe fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] + fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhadd) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10408,21 +9492,29 @@ pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] + fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10431,18 +9523,29 @@ pub unsafe fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { - simd_shuffle!(a, b, [0, 1, 2, 3]) +pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] + fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhsub_s16(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10451,21 +9554,29 @@ pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] + fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhsubq_s16(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10474,18 +9585,29 @@ pub unsafe fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { - simd_shuffle!(a, b, [0, 1]) +pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] + fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhsub_s32(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10494,19 +9616,29 @@ pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { - let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 1]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] + fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhsubq_s32(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10515,18 +9647,29 @@ pub unsafe fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] + fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhsub_s8(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10535,26 +9678,29 @@ pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x16_t = - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] + fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhsubq_s8(a, b) } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10563,18 +9709,29 @@ pub unsafe fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { - simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] + fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10583,21 +9740,29 @@ pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] + fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10606,18 +9771,29 @@ pub unsafe fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { - simd_shuffle!(a, b, [0, 1]) +pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] + fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector combine"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10626,22 +9802,28 @@ pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { - let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 1]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] + fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -10651,21 +9833,28 @@ pub unsafe fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { - transmute(a) +pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] + fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -10675,411 +9864,513 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] +pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] + fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + transmute(vld1_v2f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + let ret_val: float32x2_t = transmute(vld1_v2f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + transmute(vld1q_v4f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { - let ret_val: int16x4_t = transmute(a); +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + let ret_val: float32x4_t = transmute(vld1q_v4f32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + let ret_val: uint8x16_t = transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(a); +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + let ret_val: uint16x8_t = transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { - transmute(a) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + transmute(vld1_v2i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { - transmute(a) +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(vld1_v2i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + transmute(vld1q_v4i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + let ret_val: uint32x4_t = transmute(vld1q_v4i32( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { + transmute(vld1_v1i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + let ret_val: uint64x2_t = transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(vld1_v8i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + let ret_val: poly8x16_t = transmute(vld1q_v16i8( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(vld1_v4i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + let ret_val: poly16x8_t = transmute(vld1q_v8i16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + let ret_val: poly64x2_t = transmute(vld1q_v2i64( + ptr as *const i8, + crate::mem::align_of::() as i32, + )); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11089,21 +10380,28 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { - transmute(a) +pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0")] + fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; + } + _vld1_f32_x2(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11113,21 +10411,28 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0")] + fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; + } + _vld1_f32_x3(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11137,21 +10442,28 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { - transmute(a) +pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0")] + fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; + } + _vld1_f32_x4(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11161,21 +10473,28 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { - simd_cast(a) +pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0")] + fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; + } + _vld1q_f32_x2(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11185,23 +10504,28 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0")] + fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; + } + _vld1q_f32_x3(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11211,21 +10535,46 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { - simd_cast(a) +pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0")] + fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; + } + _vld1q_f32_x4(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + let a: *const i8 = ptr as *const i8; + let b: i32 = crate::mem::align_of::() as i32; + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; + } + transmute(_vld1_v1i64(a, b)) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(scvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11235,23 +10584,20 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { + transmute(vld1_s64_x2(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11261,21 +10607,20 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { - simd_cast(a) +pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { + transmute(vld1_s64_x3(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11285,23 +10630,21 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { + transmute(vld1_s64_x4(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11311,21 +10654,21 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { - simd_cast(a) +pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { + transmute(vld1q_s64_x2(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ucvtf) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -11335,759 +10678,658 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_cast(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { + let mut ret_val: poly64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_s32(a, N) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { + transmute(vld1q_s64_x3(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { + let mut ret_val: poly64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_s32(a, N) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { + transmute(vld1q_s64_x4(transmute(a))) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { + let mut ret_val: poly64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_s32(a, N) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_s32(a: int32x2_t, n: i32) -> float32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vcvt_n_f32_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_s32(a, N) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(scvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_s32(a: int32x4_t, n: i32) -> float32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcvtq_n_f32_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; - } - _vcvt_n_f32_u32(a.as_signed(), N) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" - )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" - )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; - } - _vcvtq_n_f32_u32(a.as_signed(), N) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { + vld1_v1i64(ptr as *const i8, crate::mem::align_of::() as i32) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0")] + fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1_s8_x2(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" + link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0")] + fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; } - _vcvt_n_f32_u32(a.as_signed(), N) + _vld1_s8_x3(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" + link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0")] + fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vcvt_n_f32_u32(a.as_signed(), N); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1_s8_x4(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" + link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0")] + fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; } - _vcvtq_n_f32_u32(a.as_signed(), N) + _vld1q_s8_x2(a) } -#[doc = "Fixed-point convert to floating-point"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ucvtf, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" + link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0")] + fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vcvtq_n_f32_u32(a.as_signed(), N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1q_s8_x3(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { unsafe extern "unadjusted" { #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0")] + fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; } - _vcvt_n_s32_f32(a, N) + _vld1q_s8_x4(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0")] + fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1_s16_x2(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" - )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_s32_f32(a, N) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" )] - fn _vcvt_n_s32_f32(a: float32x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0")] + fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvt_n_s32_f32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1_s16_x3(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0")] + fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; } - _vcvtq_n_s32_f32(a, N) + _vld1_s16_x4(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzs, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32" - )] - fn _vcvtq_n_s32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtq_n_s32_f32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - _vcvt_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" - )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" - )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; - } - _vcvtq_n_u32_f32(a, N).as_unsigned() -} -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vcvt, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - target_arch = "arm", - link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0")] + fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1q_s16_x2(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0")] + fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; } - _vcvt_n_u32_f32(a, N).as_unsigned() + _vld1q_s16_x3(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0")] + fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvt_n_u32_f32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1q_s16_x4(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0")] + fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; } - _vcvtq_n_u32_f32(a, N).as_unsigned() + _vld1_s32_x2(a) } -#[doc = "Floating-point convert to fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(fcvtzu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0")] + fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtq_n_u32_f32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1_s32_x3(a) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12097,29 +11339,28 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { +pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" )] - fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0")] + fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; } - _vcvt_s32_f32(a) + _vld1_s32_x4(a) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12129,31 +11370,28 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { +pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" )] - fn _vcvt_s32_f32(a: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0")] + fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vcvt_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1q_s32_x2(a) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12163,29 +11401,28 @@ pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { +pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" )] - fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0")] + fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; } - _vcvtq_s32_f32(a) + _vld1q_s32_x3(a) } -#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzs) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12195,31 +11432,28 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { +pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptosi.sat.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" )] - fn _vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0")] + fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vcvtq_s32_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1q_s32_x4(a) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12229,29 +11463,28 @@ pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { +pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0" )] - fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0")] + fn _vld1_s64_x2(a: *const i64) -> int64x1x2_t; } - _vcvt_u32_f32(a).as_unsigned() + _vld1_s64_x2(a) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12261,31 +11494,28 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { +pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v2i32.v2f32" + link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0" )] - fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0")] + fn _vld1_s64_x3(a: *const i64) -> int64x1x3_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vcvt_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld1_s64_x3(a) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12295,29 +11525,28 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { +pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0" )] - fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0")] + fn _vld1_s64_x4(a: *const i64) -> int64x1x4_t; } - _vcvtq_u32_f32(a).as_unsigned() + _vld1_s64_x4(a) } -#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fcvtzu) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), @@ -12327,569 +11556,491 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { +pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.fptoui.sat.v4i32.v4f32" + link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" )] - fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0")] + fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vcvtq_u32_f32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld1q_s64_x2(a) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_s32(a, b, transmute(c)) +pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0")] + fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + } + _vld1q_s64_x3(a) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0")] + fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + } + _vld1q_s64_x4(a) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_s32(a, b, transmute(c)) +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vdotq_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld1_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x8_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vdot_u32(a, b, transmute(c)) +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_lane_u32( - a: uint32x2_t, - b: uint8x8_t, - c: uint8x8_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld1_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vdotq_u32(a, b, transmute(c)) +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) } -#[doc = "Dot product arithmetic (indexed)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot, LANE = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_lane_u32( - a: uint32x4_t, - b: uint8x16_t, - c: uint8x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: uint32x4_t = vdotq_u32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld1_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" - )] - fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vdot_s32(a, b, c) +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" - )] - fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x2_t = _vdot_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" - )] - fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vdotq_s32(a, b, c) +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sdot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" - )] - fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int32x4_t = _vdotq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" - )] - fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" - )] - fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x2_t = _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" - )] - fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) } -#[doc = "Dot product arithmetic (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,dotprod")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(udot) + assert_instr(ld1) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_dotprod", issue = "117224") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" - )] - fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint32x4_t = _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld1_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12898,24 +12049,22 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12924,26 +12073,26 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld1_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12952,24 +12101,22 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12978,26 +12125,27 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld1_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13006,24 +12154,22 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13032,26 +12178,25 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13060,24 +12205,22 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13086,26 +12229,26 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(N, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13114,24 +12257,22 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13140,26 +12281,27 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13168,24 +12310,22 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + transmute(vld1_s32_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13194,26 +12334,25 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld1_s32_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13222,24 +12361,22 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + transmute(vld1_s32_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13248,26 +12385,26 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld1_s32_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13276,24 +12413,22 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + transmute(vld1_s32_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13302,26 +12437,27 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld1_s32_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13330,24 +12466,22 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + transmute(vld1q_s32_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13356,26 +12490,25 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld1q_s32_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13384,28 +12517,22 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + transmute(vld1q_s32_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13414,30 +12541,26 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 2); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: poly16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld1q_s32_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13446,28 +12569,22 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + transmute(vld1q_s32_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13476,30 +12593,26 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld1q_s32_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13508,28 +12621,21 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { + transmute(vld1_s64_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13538,30 +12644,21 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { + transmute(vld1_s64_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13570,28 +12667,22 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { + transmute(vld1_s64_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13600,30 +12691,22 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + transmute(vld1q_s64_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13632,28 +12715,25 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + let mut ret_val: uint64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13662,30 +12742,22 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + transmute(vld1q_s64_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13694,28 +12766,26 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + let mut ret_val: uint64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13724,30 +12794,22 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + transmute(vld1q_s64_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13756,70 +12818,27 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + let mut ret_val: uint64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13828,31 +12847,22 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13861,37 +12871,25 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld1_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13900,31 +12898,22 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13933,36 +12922,26 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld1_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13971,23 +12950,22 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13996,24 +12974,27 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld1_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14022,24 +13003,22 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14048,26 +13027,33 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(N, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14076,24 +13062,22 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14102,26 +13086,38 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14130,24 +13126,22 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14156,26 +13150,43 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14184,24 +13195,22 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14210,26 +13219,25 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld1_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14238,24 +13246,22 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14264,26 +13270,26 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld1_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14292,24 +13298,22 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14318,26 +13322,27 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld1_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14346,24 +13351,22 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14372,26 +13375,25 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 3); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14400,24 +13402,22 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14426,26 +13426,26 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(N, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14454,24 +13454,22 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(ld1) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14480,470 +13478,456 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v1i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; + } + _vld1_v1i64(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] + fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; + } + _vld1_v2f32(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] + fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; + } + _vld1_v2i32(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] + fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; + } + _vld1_v4i16(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] + fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; + } + _vld1_v8i8(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] + fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; + } + _vld1q_v16i8(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] + fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; + } + _vld1q_v2i64(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 4); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] + fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; + } + _vld1q_v4f32(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] + fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; + } + _vld1q_v4i32(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_uimm_bits!(N, 4); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] + fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; + } + _vld1q_v8i16(a, b) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] + fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; + } + _vld2_dup_f32(a as *const i8, 4) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 4); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x8_t = simd_shuffle!( - a, - a, - [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32] - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] + fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; + } + _vld2q_dup_f32(a as *const i8, 4) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] + fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; + } + _vld2_dup_s8(a as *const i8, 1) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] + fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; + } + _vld2q_dup_s8(a as *const i8, 1) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] + fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; + } + _vld2_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] + fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + _vld2q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] + fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + _vld2_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] + fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + _vld2q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" + )] + fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; + } + _vld2_dup_f32(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" + )] + fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; + } + _vld2q_dup_f32(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" + )] + fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; + } + _vld2_dup_s8(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" + )] + fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; + } + _vld2q_dup_s8(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" + )] + fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; + } + _vld2_dup_s16(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" + )] + fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; + } + _vld2q_dup_s16(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" + )] + fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; + } + _vld2_dup_s32(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" + )] + fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; + } + _vld2q_dup_s32(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14952,31 +13936,56 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] + fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; + } + _vld2_dup_s64(a as *const i8, 8) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" + )] + fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; + } + _vld2_dup_s64(a as _) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14985,37 +13994,22 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15024,31 +14018,22 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ) +pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15057,37 +14042,25 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shuffle!( - a, - a, - [ - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, - N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32 - ] - ); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld2_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15096,24 +14069,22 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15122,25 +14093,33 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15149,24 +14128,22 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 1); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15175,25 +14152,25 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - transmute::(simd_extract!(a, N as u32)) +pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld2_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15202,24 +14179,22 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15228,25 +14203,25 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N == 0); - let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15255,24 +14230,22 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N == 0); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_dup_s32(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15281,25 +14254,25 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N == 0); - let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld2_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15308,24 +14281,22 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_dup_s32(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15334,26 +14305,25 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld2q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15362,24 +14332,22 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - simd_shuffle!(a, a, [N as u32, N as u32]) +pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15388,24 +14356,24 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld2_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15415,21 +14383,21 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_xor(a, b) +pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15439,24 +14407,32 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15466,21 +14442,21 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_xor(a, b) +pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15490,28 +14466,24 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_xor(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld2_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15521,21 +14493,21 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_xor(a, b) +pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -15545,358 +14517,616 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_xor(a, b) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] + fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; + } + _vld2_f32(a as *const i8, 4) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] + fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; + } + _vld2q_f32(a as *const i8, 4) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_xor(a, b) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] + fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; + } + _vld2_s8(a as *const i8, 1) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] + fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; + } + _vld2q_s8(a as *const i8, 1) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_xor(a, b) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] + fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; + } + _vld2_s16(a as *const i8, 2) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] + fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + _vld2q_s16(a as *const i8, 2) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_xor(a, b) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] + fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + _vld2_s32(a as *const i8, 4) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_xor(a, b) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] + fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + _vld2q_s32(a as *const i8, 4) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2f32.p0" + )] + fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; + } + _vld2_f32(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_xor(a, b) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4f32.p0" + )] + fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; + } + _vld2q_f32(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i8.p0" + )] + fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; + } + _vld2_s8(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_xor(a, b) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v16i8.p0" + )] + fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; + } + _vld2q_s8(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_xor(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4i16.p0" + )] + fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; + } + _vld2_s16(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v8i16.p0" + )] + fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; + } + _vld2q_s16(a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v2i32.p0" + )] + fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; + } + _vld2_s32(a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v4i32.p0" + )] + fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; + } + _vld2q_s32(a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" + )] + fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; + } + _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" + )] + fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) + -> float32x4x2_t; + } + _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" + )] + fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; + } + _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" + )] + fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; + } + _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" + )] + fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; + } + _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" + )] + fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; + } + _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" + )] + fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; + } + _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] + fn _vld2_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x2_t; + } + _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] + fn _vld2q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x2_t; + } + _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] + fn _vld2q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x2_t; + } + _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] + fn _vld2q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x2_t; + } + _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] + fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) + -> int8x8x2_t; + } + _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] + fn _vld2_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x2_t; + } + _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] + fn _vld2_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x2_t; + } + _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15905,22 +15135,23 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_xor(a, b) +pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15929,25 +15160,23 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15956,22 +15185,23 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_xor(a, b) +pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15980,25 +15210,23 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16007,22 +15235,23 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_xor(a, b) +pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16031,25 +15260,23 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16058,22 +15285,23 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_xor(a, b) +pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(ld2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16082,23 +15310,21 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -16108,45 +15334,55 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_xor(a, b) +pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_s64(transmute(a))) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] + fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; + } + _vld2_s64(a as *const i8, 8) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_xor(a, b) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v1i64.p0" + )] + fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; + } + _vld2_s64(a as _) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -16156,26 +15392,22 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_xor(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_s64(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16184,28 +15416,22 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16214,31 +15440,25 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: float32x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { + let mut ret_val: uint8x8x2_t = transmute(vld2_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16247,28 +15467,22 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16277,31 +15491,33 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: int32x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + let mut ret_val: uint8x16x2_t = transmute(vld2q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16310,28 +15526,22 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16340,31 +15550,25 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: uint32x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + let mut ret_val: uint16x4x2_t = transmute(vld2_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16373,34 +15577,22 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16409,37 +15601,25 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: int8x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + let mut ret_val: uint16x8x2_t = transmute(vld2q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16448,34 +15628,22 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_s32(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16484,37 +15652,25 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: int16x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + let mut ret_val: uint32x2x2_t = transmute(vld2_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16523,34 +15679,22 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_s32(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16559,37 +15703,25 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: uint8x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + let mut ret_val: uint32x4x2_t = transmute(vld2q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16598,34 +15730,22 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16634,37 +15754,25 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: uint16x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + let mut ret_val: poly8x8x2_t = transmute(vld2_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16673,34 +15781,22 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16709,37 +15805,33 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: poly8x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + let mut ret_val: poly8x16x2_t = transmute(vld2q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16748,34 +15840,22 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16784,37 +15864,25 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - static_assert_uimm_bits!(N, 3); - let ret_val: poly16x8_t = match N & 0b111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), - 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), - 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), - 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), - 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + let mut ret_val: poly16x4x2_t = transmute(vld2_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16823,30 +15891,22 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16855,33 +15915,323 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: float32x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + let mut ret_val: poly16x8x2_t = transmute(vld2q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" + )] + fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; + } + _vld3_dup_f32(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" + )] + fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; + } + _vld3q_dup_f32(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" + )] + fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; + } + _vld3_dup_s8(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" + )] + fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; + } + _vld3q_dup_s8(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" + )] + fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; + } + _vld3_dup_s16(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" + )] + fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; + } + _vld3q_dup_s16(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" + )] + fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; + } + _vld3_dup_s32(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" + )] + fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; + } + _vld3q_dup_s32(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" + )] + fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; + } + _vld3_dup_s64(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] + fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] + fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] + fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] + fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] + fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] + fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] + fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] + fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16890,30 +16240,37 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), +pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] + fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } + _vld3_dup_s64(a as *const i8, 8) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16922,33 +16279,22 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: int16x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16957,30 +16303,22 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16989,33 +16327,26 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: int32x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld3_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17024,30 +16355,22 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17056,33 +16379,38 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: uint16x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17091,30 +16419,22 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17123,33 +16443,26 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: uint32x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld3_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17158,30 +16471,22 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17190,33 +16495,26 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - static_assert_uimm_bits!(N, 2); - let ret_val: poly16x4_t = match N & 0b11 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), - 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), - 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), - 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17225,28 +16523,22 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_dup_s32(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17255,31 +16547,26 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: int64x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld3_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17288,28 +16575,22 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_dup_s32(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17318,31 +16599,26 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - static_assert_uimm_bits!(N, 1); - let ret_val: uint64x2_t = match N & 0b1 { - 0 => simd_shuffle!(a, b, [0, 1]), - 1 => simd_shuffle!(a, b, [1, 2]), - _ => unreachable_unchecked(), - }; - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld3q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17351,102 +16627,22 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17455,109 +16651,26 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - static_assert_uimm_bits!(N, 4); - let ret_val: int8x16_t = match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld3_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17566,102 +16679,22 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17670,109 +16703,38 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - static_assert_uimm_bits!(N, 4); - let ret_val: uint8x16_t = match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] -#[doc = "## Safety"] +pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] +#[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17781,102 +16743,22 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - } +pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -17885,107 +16767,25 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - static_assert_uimm_bits!(N, 4); - let ret_val: poly8x16_t = match N & 0b1111 { - 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 1 => simd_shuffle!( - a, - b, - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - ), - 2 => simd_shuffle!( - a, - b, - [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - ), - 3 => simd_shuffle!( - a, - b, - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - ), - 4 => simd_shuffle!( - a, - b, - [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ), - 5 => simd_shuffle!( - a, - b, - [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - ), - 6 => simd_shuffle!( - a, - b, - [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - ), - 7 => simd_shuffle!( - a, - b, - [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] - ), - 8 => simd_shuffle!( - a, - b, - [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - ), - 9 => simd_shuffle!( - a, - b, - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - ), - 10 => simd_shuffle!( - a, - b, - [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ), - 11 => simd_shuffle!( - a, - b, - [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - ), - 12 => simd_shuffle!( - a, - b, - [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] - ), - 13 => simd_shuffle!( - a, - b, - [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] - ), - 14 => simd_shuffle!( - a, - b, - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] - ), - 15 => simd_shuffle!( - a, - b, - [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30] - ), - _ => unreachable_unchecked(), - }; - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld3_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17995,26 +16795,21 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] - fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - } - _vfma_f32(b, c, a) +pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -18024,709 +16819,645 @@ pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { +pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] - fn _vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2f32.p0" + )] + fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = _vfma_f32(b, c, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vld3_f32(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] - fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4f32.p0" + )] + fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; } - _vfmaq_f32(b, c, a) + _vld3q_f32(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] - #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] - fn _vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i8.p0" + )] + fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vfmaq_f32(b, c, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld3_s8(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfma_f32(a, b, vdup_n_f32_vfp4(c)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v16i8.p0" + )] + fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; + } + _vld3q_s8(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = vfma_f32(a, b, vdup_n_f32_vfp4(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i16.p0" + )] + fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; + } + _vld3_s16(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v8i16.p0" + )] + fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; + } + _vld3q_s16(a as _) } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v2i32.p0" + )] + fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; + } + _vld3_s32(a as _) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let b: float32x2_t = simd_neg(b); - vfma_f32(a, b, c) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3.v4i32.p0" + )] + fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; + } + _vld3q_s32(a as _) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let b: float32x2_t = simd_neg(b); - let ret_val: float32x2_t = vfma_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] + fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_f32(a as *const i8, 4) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let b: float32x4_t = simd_neg(b); - vfmaq_f32(a, b, c) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] + fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_f32(a as *const i8, 4) } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let b: float32x4_t = simd_neg(b); - let ret_val: float32x4_t = vfmaq_f32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] + fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_s8(a as *const i8, 1) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfms_f32(a, b, vdup_n_f32_vfp4(c)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] + fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_s8(a as *const i8, 1) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = vfms_f32(a, b, vdup_n_f32_vfp4(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] + fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_s16(a as *const i8, 2) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] + fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_s16(a as *const i8, 2) } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] + fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_s32(a as *const i8, 4) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] - fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] + fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; } - _vhadd_s8(a, b) + _vld3q_s32(a as *const i8, 4) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i8" + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] - fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vld3_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x3_t; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vhadd_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v16i8" + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] - fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vld3q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x3_t; } - _vhaddq_s8(a, b) + _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] - fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] + fn _vld3_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x3_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vhaddq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i16" + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] - fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vld3_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x3_t; } - _vhadd_s16(a, b) + _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i16" + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] - fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vld3_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x3_t; } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vhadd_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i16" + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] - fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vld3q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x3_t; } - _vhaddq_s16(a, b) + _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i16" + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] - fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vld3_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x3_t; } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vhaddq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v2i32" + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] - fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vld3q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x3_t; } - _vhadd_s32(a, b) + _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] + fn _vld3_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x3_t; + } + _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] + fn _vld3_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x3_t; + } + _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] + fn _vld3q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] + fn _vld3_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x3_t; + } + _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] + fn _vld3q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18735,33 +17466,23 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] - fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vhadd_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18770,30 +17491,23 @@ pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] - fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhaddq_s32(a, b) +pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18802,33 +17516,23 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] - fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vhaddq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18837,30 +17541,23 @@ pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18869,33 +17566,23 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18904,30 +17591,23 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18936,37 +17616,23 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -18975,29 +17641,21 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -19007,32 +17665,55 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_s64(transmute(a))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i16" + link_name = "llvm.aarch64.neon.ld3.v1i64.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld3_s64(a as _) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] + fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + } + _vld3_s64(a as *const i8, 8) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -19042,29 +17723,21 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_s64(transmute(a))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19074,32 +17747,21 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19109,29 +17771,25 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19141,32 +17799,21 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19176,64 +17823,37 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19243,29 +17863,21 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] - fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vhsub_s16(a, b) +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19275,32 +17887,25 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] - fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vhsub_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19310,29 +17915,21 @@ pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] - fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vhsubq_s16(a, b) +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19342,32 +17939,25 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] - fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vhsubq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19377,29 +17967,21 @@ pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] - fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhsub_s32(a, b) +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_s32(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19409,32 +17991,25 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] - fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vhsub_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19444,29 +18019,21 @@ pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] - fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhsubq_s32(a, b) +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_s32(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19476,32 +18043,25 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] - fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vhsubq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19511,29 +18071,21 @@ pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] - fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vhsub_s8(a, b) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19543,32 +18095,25 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] - fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vhsub_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19578,29 +18123,21 @@ pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] - fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vhsubq_s8(a, b) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19610,36 +18147,37 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] - fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vhsubq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19649,29 +18187,21 @@ pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19681,32 +18211,25 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19716,29 +18239,21 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -19748,135 +18263,387 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; } - _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] + fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vld4_dup_f32(a as *const i8, 4) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] + fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] + fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] + fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] + fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] + fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] + fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] + fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" + )] + fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + } + _vld4_dup_f32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" + )] + fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" + )] + fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + } + _vld4_dup_s8(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" + )] + fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + } + _vld4q_dup_s8(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" + )] + fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + } + _vld4_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" + )] + fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + } + _vld4q_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" + )] + fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" + )] + fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" + )] + fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; + } + _vld4_dup_s64(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] + fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } - _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vld4_dup_s64(a as *const i8, 8) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19886,32 +18653,21 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19921,29 +18677,21 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19953,32 +18701,26 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -19988,29 +18730,21 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -20020,821 +18754,1141 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - transmute(vld1_v2f32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - let ret_val: float32x2_t = transmute(vld1_v2f32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - transmute(vld1q_v4f32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - let ret_val: float32x4_t = transmute(vld1q_v4f32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - transmute(vld1_v8i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_dup_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(vld1_v8i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - transmute(vld1q_v16i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_dup_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - let ret_val: uint8x16_t = transmute(vld1q_v16i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - transmute(vld1_v4i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(vld1_v4i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - transmute(vld1q_v8i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - let ret_val: uint16x8_t = transmute(vld1q_v8i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - transmute(vld1_v2i32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(vld1_v2i32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - transmute(vld1q_v4i32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - let ret_val: uint32x4_t = transmute(vld1q_v4i32( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { - transmute(vld1_v1i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2f32.p0" + )] + fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; + } + _vld4_f32(a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - transmute(vld1q_v2i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4f32.p0" + )] + fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; + } + _vld4q_f32(a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - let ret_val: uint64x2_t = transmute(vld1q_v2i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i8.p0" + )] + fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; + } + _vld4_s8(a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - transmute(vld1_v8i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v16i8.p0" + )] + fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; + } + _vld4q_s8(a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(vld1_v8i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i16.p0" + )] + fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; + } + _vld4_s16(a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v8i16.p0" + )] + fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; + } + _vld4q_s16(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v2i32.p0" + )] + fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; + } + _vld4_s32(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v4i32.p0" + )] + fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; + } + _vld4q_s32(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - transmute(vld1q_v16i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] + fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_f32(a as *const i8, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - let ret_val: poly8x16_t = transmute(vld1q_v16i8( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] + fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_f32(a as *const i8, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - transmute(vld1_v4i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] + fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_s8(a as *const i8, 1) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(vld1_v4i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] + fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_s8(a as *const i8, 1) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - transmute(vld1q_v8i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] + fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_s16(a as *const i8, 2) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - let ret_val: poly16x8_t = transmute(vld1q_v8i16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] + fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_s16(a as *const i8, 2) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - transmute(vld1q_v2i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] + fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_s32(a as *const i8, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - let ret_val: poly64x2_t = transmute(vld1q_v2i64( - ptr as *const i8, - crate::mem::align_of::() as i32, - )); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] + fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_s32(a as *const i8, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0")] - fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; + fn _vld4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x4_t; } - _vld1_f32_x2(a) + _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0")] - fn _vld1_f32_x2(a: *const f32) -> float32x2x2_t; + fn _vld4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x4_t; } - let mut ret_val: float32x2x2_t = _vld1_f32_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val + _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0")] - fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; + fn _vld4_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x4_t; } - _vld1_f32_x3(a) + _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0")] - fn _vld1_f32_x3(a: *const f32) -> float32x2x3_t; + fn _vld4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x4_t; } - let mut ret_val: float32x2x3_t = _vld1_f32_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val + _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0")] - fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; + fn _vld4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x4_t; } - _vld1_f32_x4(a) + _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0")] - fn _vld1_f32_x4(a: *const f32) -> float32x2x4_t; + fn _vld4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x4_t; } - let mut ret_val: float32x2x4_t = _vld1_f32_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val + _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0")] - fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; + fn _vld4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x4_t; } - _vld1q_f32_x2(a) + _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0")] - fn _vld1q_f32_x2(a: *const f32) -> float32x4x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] + fn _vld4_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x4_t; } - let mut ret_val: float32x4x2_t = _vld1q_f32_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val + _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0")] - fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] + fn _vld4q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x4_t; } - _vld1q_f32_x3(a) + _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] + fn _vld4_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x4_t; + } + _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] + fn _vld4_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x4_t; + } + _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] + fn _vld4q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] + fn _vld4_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x4_t; + } + _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] + fn _vld4q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20843,34 +19897,23 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0")] - fn _vld1q_f32_x3(a: *const f32) -> float32x4x3_t; - } - let mut ret_val: float32x4x3_t = _vld1q_f32_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val +pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20879,30 +19922,23 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0")] - fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; - } - _vld1q_f32_x4(a) +pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20911,52 +19947,23 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0")] - fn _vld1q_f32_x4(a: *const f32) -> float32x4x4_t; - } - let mut ret_val: float32x4x4_t = _vld1q_f32_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - let a: *const i8 = ptr as *const i8; - let b: i32 = crate::mem::align_of::() as i32; - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] - fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; - } - transmute(_vld1_v1i64(a, b)) +pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20965,21 +19972,23 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { - transmute(vld1_s64_x2(transmute(a))) +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -20988,21 +19997,23 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { - transmute(vld1_s64_x3(transmute(a))) +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21011,22 +20022,23 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { - transmute(vld1_s64_x4(transmute(a))) +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21035,22 +20047,23 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { - transmute(vld1q_s64_x2(transmute(a))) +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21059,24 +20072,21 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { - let mut ret_val: poly64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -21086,21 +20096,55 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { - transmute(vld1q_s64_x3(transmute(a))) +pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_s64(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v1i64.p0" + )] + fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; + } + _vld4_s64(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] + fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + } + _vld4_s64(a as *const i8, 8) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -21110,25 +20154,21 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { - let mut ret_val: poly64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val +pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_s64(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21138,21 +20178,21 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { - transmute(vld1q_s64_x4(transmute(a))) +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21162,231 +20202,26 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { - let mut ret_val: poly64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - let ret_val: int8x8_t = vld1_v8i8(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - let ret_val: int8x16_t = vld1q_v16i8(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - let ret_val: int16x4_t = vld1_v4i16(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - let ret_val: int16x8_t = vld1q_v8i16(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - let ret_val: int32x2_t = vld1_v2i32(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - let ret_val: int32x4_t = vld1q_v4i32(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { - vld1_v1i64(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - let ret_val: int64x2_t = vld1q_v2i64(ptr as *const i8, crate::mem::align_of::() as i32); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21396,29 +20231,21 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0")] - fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; - } - _vld1_s8_x2(a) +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21428,32 +20255,42 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0")] - fn _vld1_s8_x2(a: *const i8) -> int8x8x2_t; - } - let mut ret_val: int8x8x2_t = _vld1_s8_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21463,29 +20300,21 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0")] - fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; - } - _vld1_s8_x3(a) +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21495,33 +20324,26 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0")] - fn _vld1_s8_x3(a: *const i8) -> int8x8x3_t; - } - let mut ret_val: int8x8x3_t = _vld1_s8_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21531,29 +20353,21 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0")] - fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; - } - _vld1_s8_x4(a) +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21563,34 +20377,26 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0")] - fn _vld1_s8_x4(a: *const i8) -> int8x8x4_t; - } - let mut ret_val: int8x8x4_t = _vld1_s8_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21600,29 +20406,21 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0")] - fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; - } - _vld1q_s8_x2(a) +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21632,40 +20430,26 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0")] - fn _vld1q_s8_x2(a: *const i8) -> int8x16x2_t; - } - let mut ret_val: int8x16x2_t = _vld1q_s8_x2(a); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21675,29 +20459,21 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0")] - fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; - } - _vld1q_s8_x3(a) +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_s32(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21707,45 +20483,26 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0")] - fn _vld1q_s8_x3(a: *const i8) -> int8x16x3_t; - } - let mut ret_val: int8x16x3_t = _vld1q_s8_x3(a); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21755,29 +20512,21 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0")] - fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; - } - _vld1q_s8_x4(a) +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21787,50 +20536,26 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0")] - fn _vld1q_s8_x4(a: *const i8) -> int8x16x4_t; - } - let mut ret_val: int8x16x4_t = _vld1q_s8_x4(a); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21840,29 +20565,21 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0")] - fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; - } - _vld1_s16_x2(a) +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_s8(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21872,32 +20589,42 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0")] - fn _vld1_s16_x2(a: *const i16) -> int16x4x2_t; - } - let mut ret_val: int16x4x2_t = _vld1_s16_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21907,29 +20634,21 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0")] - fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; - } - _vld1_s16_x3(a) +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21939,33 +20658,26 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0")] - fn _vld1_s16_x3(a: *const i16) -> int16x4x3_t; - } - let mut ret_val: int16x4x3_t = _vld1_s16_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -21975,29 +20687,21 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0")] - fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; - } - _vld1_s16_x4(a) +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_s16(transmute(a))) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -22007,34 +20711,25 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0")] - fn _vld1_s16_x4(a: *const i16) -> int16x4x4_t; - } - let mut ret_val: int16x4x4_t = _vld1_s16_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22044,29 +20739,28 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { +pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" + link_name = "llvm.aarch64.neon.fmax.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0")] - fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; + fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - _vld1q_s16_x2(a) + _vmax_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22076,32 +20770,28 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { +pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0" + link_name = "llvm.aarch64.neon.fmax.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0")] - fn _vld1q_s16_x2(a: *const i16) -> int16x8x2_t; + fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let mut ret_val: int16x8x2_t = _vld1q_s16_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val + _vmaxq_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22111,29 +20801,28 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { +pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" + link_name = "llvm.aarch64.neon.smax.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0")] - fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; + fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vld1q_s16_x3(a) + _vmax_s8(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22143,33 +20832,28 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { +pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0" + link_name = "llvm.aarch64.neon.smax.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0")] - fn _vld1q_s16_x3(a: *const i16) -> int16x8x3_t; + fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let mut ret_val: int16x8x3_t = _vld1q_s16_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val + _vmaxq_s8(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22179,29 +20863,28 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { +pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" + link_name = "llvm.aarch64.neon.smax.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0")] - fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; + fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vld1q_s16_x4(a) + _vmax_s16(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22211,34 +20894,28 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { +pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0" + link_name = "llvm.aarch64.neon.smax.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0")] - fn _vld1q_s16_x4(a: *const i16) -> int16x8x4_t; + fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - let mut ret_val: int16x8x4_t = _vld1q_s16_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val + _vmaxq_s16(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22248,29 +20925,28 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { +pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" + link_name = "llvm.aarch64.neon.smax.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0")] - fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; + fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - _vld1_s32_x2(a) + _vmax_s32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22280,32 +20956,28 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { +pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0" + link_name = "llvm.aarch64.neon.smax.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0")] - fn _vld1_s32_x2(a: *const i32) -> int32x2x2_t; + fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - let mut ret_val: int32x2x2_t = _vld1_s32_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val + _vmaxq_s32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22315,29 +20987,28 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { +pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" + link_name = "llvm.aarch64.neon.umax.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0")] - fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; + fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vld1_s32_x3(a) + _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22347,33 +21018,28 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { +pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0" + link_name = "llvm.aarch64.neon.umax.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0")] - fn _vld1_s32_x3(a: *const i32) -> int32x2x3_t; + fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let mut ret_val: int32x2x3_t = _vld1_s32_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val + _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22383,29 +21049,28 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { +pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" + link_name = "llvm.aarch64.neon.umax.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0")] - fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; + fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vld1_s32_x4(a) + _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22415,34 +21080,28 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { +pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0" + link_name = "llvm.aarch64.neon.umax.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0")] - fn _vld1_s32_x4(a: *const i32) -> int32x2x4_t; + fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - let mut ret_val: int32x2x4_t = _vld1_s32_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val + _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22452,29 +21111,28 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { +pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" + link_name = "llvm.aarch64.neon.umax.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0")] - fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; + fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - _vld1q_s32_x2(a) + _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umax) )] #[cfg_attr( not(target_arch = "arm"), @@ -22484,32 +21142,28 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { +pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0" + link_name = "llvm.aarch64.neon.umax.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0")] - fn _vld1q_s32_x2(a: *const i32) -> int32x4x2_t; + fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - let mut ret_val: int32x4x2_t = _vld1q_s32_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val + _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmaxnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22519,29 +21173,28 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { +pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" + link_name = "llvm.aarch64.neon.fmaxnm.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0")] - fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; + fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - _vld1q_s32_x3(a) + _vmaxnm_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmaxnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -22551,33 +21204,28 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { +pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0" + link_name = "llvm.aarch64.neon.fmaxnm.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0")] - fn _vld1q_s32_x3(a: *const i32) -> int32x4x3_t; + fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let mut ret_val: int32x4x3_t = _vld1q_s32_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val + _vmaxnmq_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22587,29 +21235,28 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { +pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" + link_name = "llvm.aarch64.neon.fmin.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0")] - fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; + fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - _vld1q_s32_x4(a) + _vmin_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22619,33 +21266,28 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { +pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0" + link_name = "llvm.aarch64.neon.fmin.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0")] - fn _vld1q_s32_x4(a: *const i32) -> int32x4x4_t; + fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let mut ret_val: int32x4x4_t = _vld1q_s32_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val + _vminq_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22655,28 +21297,28 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { +pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0" + link_name = "llvm.aarch64.neon.smin.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0")] - fn _vld1_s64_x2(a: *const i64) -> int64x1x2_t; + fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vld1_s64_x2(a) + _vmin_s8(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22686,28 +21328,28 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { +pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0" + link_name = "llvm.aarch64.neon.smin.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0")] - fn _vld1_s64_x3(a: *const i64) -> int64x1x3_t; + fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vld1_s64_x3(a) + _vminq_s8(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22717,29 +21359,28 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { +pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0" + link_name = "llvm.aarch64.neon.smin.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0")] - fn _vld1_s64_x4(a: *const i64) -> int64x1x4_t; + fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vld1_s64_x4(a) + _vmin_s16(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22749,29 +21390,28 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { +pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" + link_name = "llvm.aarch64.neon.smin.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0")] - fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; + fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vld1q_s64_x2(a) + _vminq_s16(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22781,32 +21421,28 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { +pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0" + link_name = "llvm.aarch64.neon.smin.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0")] - fn _vld1q_s64_x2(a: *const i64) -> int64x2x2_t; + fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - let mut ret_val: int64x2x2_t = _vld1q_s64_x2(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val + _vmin_s32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22816,29 +21452,28 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { +pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" + link_name = "llvm.aarch64.neon.smin.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0")] - fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vld1q_s64_x3(a) + _vminq_s32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22848,33 +21483,28 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { +pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0" + link_name = "llvm.aarch64.neon.umin.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0")] - fn _vld1q_s64_x3(a: *const i64) -> int64x2x3_t; + fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - let mut ret_val: int64x2x3_t = _vld1q_s64_x3(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val + _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22884,29 +21514,28 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { +pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" + link_name = "llvm.aarch64.neon.umin.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0")] - fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vld1q_s64_x4(a) + _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22916,34 +21545,28 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { +pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0" + link_name = "llvm.aarch64.neon.umin.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0")] - fn _vld1q_s64_x4(a: *const i64) -> int64x2x4_t; + fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - let mut ret_val: int64x2x4_t = _vld1q_s64_x4(a); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val + _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22953,21 +21576,28 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { - transmute(vld1_s8_x2(transmute(a))) +pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v8i16" + )] + fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -22977,24 +21607,28 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { - let mut ret_val: uint8x8x2_t = transmute(vld1_s8_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v2i32" + )] + fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -23004,21 +21638,28 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { - transmute(vld1_s8_x3(transmute(a))) +pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umin.v4i32" + )] + fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -23028,25 +21669,28 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { - let mut ret_val: uint8x8x3_t = transmute(vld1_s8_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f32" + )] + fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vminnm_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -23056,21 +21700,28 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { - transmute(vld1_s8_x4(transmute(a))) +pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v4f32" + )] + fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vminnmq_f32(a, b) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23080,26 +21731,20 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld1_s8_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23109,22 +21754,22 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { - transmute(vld1q_s8_x2(transmute(a))) +pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23133,33 +21778,27 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { - let mut ret_val: uint8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23168,22 +21807,27 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { - transmute(vld1q_s8_x3(transmute(a))) +pub unsafe fn vmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23192,38 +21836,31 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { - let mut ret_val: uint8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23232,22 +21869,31 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { - transmute(vld1q_s8_x4(transmute(a))) +pub unsafe fn vmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23256,43 +21902,31 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmla_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23301,22 +21935,31 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { - transmute(vld1_s16_x2(transmute(a))) +pub unsafe fn vmla_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23325,25 +21968,31 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { - let mut ret_val: uint16x4x2_t = transmute(vld1_s16_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmla_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23352,22 +22001,31 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { - transmute(vld1_s16_x3(transmute(a))) +pub unsafe fn vmla_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmla_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23376,26 +22034,44 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { - let mut ret_val: uint16x4x3_t = transmute(vld1_s16_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmlaq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23404,22 +22080,44 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { - transmute(vld1_s16_x4(transmute(a))) +pub unsafe fn vmlaq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23428,27 +22126,44 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld1_s16_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmlaq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlaq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23457,22 +22172,44 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { - transmute(vld1q_s16_x2(transmute(a))) +pub unsafe fn vmlaq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlaq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23481,25 +22218,27 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { - let mut ret_val: uint16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23508,22 +22247,27 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { - transmute(vld1q_s16_x3(transmute(a))) +pub unsafe fn vmla_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23532,26 +22276,27 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { - let mut ret_val: uint16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23560,22 +22305,27 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { - transmute(vld1q_s16_x4(transmute(a))) +pub unsafe fn vmla_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23584,27 +22334,31 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlaq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23613,22 +22367,31 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { - transmute(vld1_s32_x2(transmute(a))) +pub unsafe fn vmlaq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23637,25 +22400,31 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { - let mut ret_val: uint32x2x2_t = transmute(vld1_s32_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val +pub unsafe fn vmlaq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23664,21 +22433,29 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { - transmute(vld1_s32_x3(transmute(a))) +pub unsafe fn vmlaq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23688,25 +22465,20 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { - let mut ret_val: uint32x2x3_t = transmute(vld1_s32_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val +pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmla_f32(a, b, vdup_n_f32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -23716,21 +22488,20 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { - transmute(vld1_s32_x4(transmute(a))) +pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlaq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23740,26 +22511,20 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld1_s32_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val +pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmla_s16(a, b, vdup_n_s16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23769,21 +22534,20 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { - transmute(vld1q_s32_x2(transmute(a))) +pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlaq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23793,24 +22557,20 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { - let mut ret_val: uint32x4x2_t = transmute(vld1q_s32_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmla_u16(a, b, vdup_n_u16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23820,21 +22580,20 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { - transmute(vld1q_s32_x3(transmute(a))) +pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlaq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23844,25 +22603,20 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { - let mut ret_val: uint32x4x3_t = transmute(vld1q_s32_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmla_s32(a, b, vdup_n_s32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23872,21 +22626,20 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { - transmute(vld1q_s32_x4(transmute(a))) +pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlaq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23896,25 +22649,20 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld1q_s32_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmla_u32(a, b, vdup_n_u32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23924,20 +22672,20 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { - transmute(vld1_s64_x2(transmute(a))) +pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlaq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23947,20 +22695,20 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { - transmute(vld1_s64_x3(transmute(a))) +pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23970,21 +22718,20 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { - transmute(vld1_s64_x4(transmute(a))) +pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -23994,21 +22741,20 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { - transmute(vld1q_s64_x2(transmute(a))) +pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24018,24 +22764,20 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { - let mut ret_val: uint64x2x2_t = transmute(vld1q_s64_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val +pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24045,21 +22787,20 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { - transmute(vld1q_s64_x3(transmute(a))) +pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24069,25 +22810,20 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { - let mut ret_val: uint64x2x3_t = transmute(vld1q_s64_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val +pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24097,21 +22833,20 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { - transmute(vld1q_s64_x4(transmute(a))) +pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24121,26 +22856,20 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { - let mut ret_val: uint64x2x4_t = transmute(vld1q_s64_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val +pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24150,21 +22879,20 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { - transmute(vld1_s8_x2(transmute(a))) +pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24174,24 +22902,20 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { - let mut ret_val: poly8x8x2_t = transmute(vld1_s8_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24201,21 +22925,20 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { - transmute(vld1_s8_x3(transmute(a))) +pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -24225,26 +22948,22 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { - let mut ret_val: poly8x8x3_t = transmute(vld1_s8_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + simd_add(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24253,22 +22972,31 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { - transmute(vld1_s8_x4(transmute(a))) +pub unsafe fn vmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24277,27 +23005,31 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld1_s8_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24306,22 +23038,27 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { - transmute(vld1q_s8_x2(transmute(a))) +pub unsafe fn vmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24330,33 +23067,27 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { - let mut ret_val: poly8x16x2_t = transmute(vld1q_s8_x2(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmlal_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24365,22 +23096,31 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { - transmute(vld1q_s8_x3(transmute(a))) +pub unsafe fn vmlal_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24389,38 +23129,31 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { - let mut ret_val: poly8x16x3_t = transmute(vld1q_s8_x3(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmlal_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24429,22 +23162,27 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { - transmute(vld1q_s8_x4(transmute(a))) +pub unsafe fn vmlal_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24453,42 +23191,25 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld1q_s8_x4(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val +pub unsafe fn vmlal_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24498,21 +23219,20 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { - transmute(vld1_s16_x2(transmute(a))) +pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlal_s16(a, b, vdup_n_s16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24522,24 +23242,20 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { - let mut ret_val: poly16x4x2_t = transmute(vld1_s16_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlal_s32(a, b, vdup_n_s32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24549,21 +23265,20 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { - transmute(vld1_s16_x3(transmute(a))) +pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlal_u16(a, b, vdup_n_u16(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24573,25 +23288,20 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { - let mut ret_val: poly16x4x3_t = transmute(vld1_s16_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlal_u32(a, b, vdup_n_u32(c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24601,21 +23311,20 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { - transmute(vld1_s16_x4(transmute(a))) +pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + simd_add(a, vmull_s8(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24625,26 +23334,20 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld1_s16_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + simd_add(a, vmull_s16(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24654,21 +23357,20 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { - transmute(vld1q_s16_x2(transmute(a))) +pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + simd_add(a, vmull_s32(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24678,24 +23380,20 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { - let mut ret_val: poly16x8x2_t = transmute(vld1q_s16_x2(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + simd_add(a, vmull_u8(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24705,21 +23403,20 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { - transmute(vld1q_s16_x3(transmute(a))) +pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + simd_add(a, vmull_u16(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -24729,25 +23426,20 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { - let mut ret_val: poly16x8x3_t = transmute(vld1q_s16_x3(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + simd_add(a, vmull_u32(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -24757,21 +23449,20 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { - transmute(vld1q_s16_x4(transmute(a))) +pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -24781,47463 +23472,324 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld1q_s16_x4(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val +pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v1i64)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v1i64(a: *const i8, b: i32) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] - fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; - } - _vld1_v1i64(a, b) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] - fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; - } - _vld1_v2f32(a, b) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2f32(a: *const i8, b: i32) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] - fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; - } - let ret_val: float32x2_t = _vld1_v2f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] - fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; - } - _vld1_v2i32(a, b) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_f32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v2i32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2i32(a: *const i8, b: i32) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] - fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; - } - let ret_val: int32x2_t = _vld1_v2i32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] - fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; - } - _vld1_v4i16(a, b) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_lane_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v4i16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v4i16(a: *const i8, b: i32) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] - fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; - } - let ret_val: int16x4_t = _vld1_v4i16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] - fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; - } - _vld1_v8i8(a, b) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmls_laneq_u16( + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_v8i8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v8i8(a: *const i8, b: i32) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] - fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; - } - let ret_val: int8x8_t = _vld1_v8i8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(mls, LANE = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vmlsq_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] - fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; - } - _vld1q_v16i8(a, b) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v16i8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] - fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; - } - let ret_val: int8x16_t = _vld1q_v16i8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] - fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; - } - _vld1q_v2i64(a, b) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v2i64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] - fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; - } - let ret_val: int64x2_t = _vld1q_v2i64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] - fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; - } - _vld1q_v4f32(a, b) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] - fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; - } - let ret_val: float32x4_t = _vld1q_v4f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] - fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; - } - _vld1q_v4i32(a, b) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v4i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] - fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; - } - let ret_val: int32x4_t = _vld1q_v4i32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] - fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; - } - _vld1q_v8i16(a, b) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_v8i16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] - fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; - } - let ret_val: int16x8_t = _vld1q_v8i16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] - fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] - fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - let mut ret_val: float32x2x2_t = _vld2_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] - fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] - fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - let mut ret_val: float32x4x2_t = _vld2q_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] - fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] - fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - let mut ret_val: int8x8x2_t = _vld2_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] - fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] - fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - let mut ret_val: int8x16x2_t = _vld2q_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] - fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] - fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - let mut ret_val: int16x4x2_t = _vld2_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] - fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] - fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - let mut ret_val: int16x8x2_t = _vld2q_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] - fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] - fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - let mut ret_val: int32x2x2_t = _vld2_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] - fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] - fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - let mut ret_val: int32x4x2_t = _vld2q_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" - )] - fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; - } - _vld2_dup_f32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" - )] - fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; - } - let mut ret_val: float32x2x2_t = _vld2_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" - )] - fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" - )] - fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; - } - let mut ret_val: float32x4x2_t = _vld2q_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" - )] - fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; - } - _vld2_dup_s8(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" - )] - fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; - } - let mut ret_val: int8x8x2_t = _vld2_dup_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" - )] - fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; - } - _vld2q_dup_s8(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" - )] - fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; - } - let mut ret_val: int8x16x2_t = _vld2q_dup_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" - )] - fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; - } - _vld2_dup_s16(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" - )] - fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; - } - let mut ret_val: int16x4x2_t = _vld2_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" - )] - fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; - } - _vld2q_dup_s16(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" - )] - fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; - } - let mut ret_val: int16x8x2_t = _vld2q_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" - )] - fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" - )] - fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; - } - let mut ret_val: int32x2x2_t = _vld2_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" - )] - fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" - )] - fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; - } - let mut ret_val: int32x4x2_t = _vld2q_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] - fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; - } - _vld2_dup_s64(a as *const i8, 8) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" - )] - fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; - } - _vld2_dup_s64(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { - let mut ret_val: uint8x8x2_t = transmute(vld2_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { - let mut ret_val: uint8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { - let mut ret_val: uint16x4x2_t = transmute(vld2_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { - let mut ret_val: uint16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_dup_s32(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { - let mut ret_val: uint32x2x2_t = transmute(vld2_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_dup_s32(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { - let mut ret_val: uint32x4x2_t = transmute(vld2q_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { - let mut ret_val: poly8x8x2_t = transmute(vld2_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { - let mut ret_val: poly8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { - let mut ret_val: poly16x4x2_t = transmute(vld2_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { - let mut ret_val: poly16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] - fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_f32(a as *const i8, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] - fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - let mut ret_val: float32x2x2_t = _vld2_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] - fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_f32(a as *const i8, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] - fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - let mut ret_val: float32x4x2_t = _vld2q_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] - fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_s8(a as *const i8, 1) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] - fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - let mut ret_val: int8x8x2_t = _vld2_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] - fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_s8(a as *const i8, 1) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] - fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - let mut ret_val: int8x16x2_t = _vld2q_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] - fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_s16(a as *const i8, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] - fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - let mut ret_val: int16x4x2_t = _vld2_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] - fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_s16(a as *const i8, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] - fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - let mut ret_val: int16x8x2_t = _vld2q_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] - fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_s32(a as *const i8, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] - fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - let mut ret_val: int32x2x2_t = _vld2_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] - fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] - fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - let mut ret_val: int32x4x2_t = _vld2q_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0" - )] - fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; - } - _vld2_f32(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0" - )] - fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; - } - let mut ret_val: float32x2x2_t = _vld2_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0" - )] - fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; - } - _vld2q_f32(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0" - )] - fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; - } - let mut ret_val: float32x4x2_t = _vld2q_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0" - )] - fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; - } - _vld2_s8(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0" - )] - fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; - } - let mut ret_val: int8x8x2_t = _vld2_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0" - )] - fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; - } - _vld2q_s8(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0" - )] - fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; - } - let mut ret_val: int8x16x2_t = _vld2q_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0" - )] - fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; - } - _vld2_s16(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0" - )] - fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; - } - let mut ret_val: int16x4x2_t = _vld2_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0" - )] - fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; - } - _vld2q_s16(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0" - )] - fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; - } - let mut ret_val: int16x8x2_t = _vld2q_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0" - )] - fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; - } - _vld2_s32(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0" - )] - fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; - } - let mut ret_val: int32x2x2_t = _vld2_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0" - )] - fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; - } - _vld2q_s32(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0" - )] - fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; - } - let mut ret_val: int32x4x2_t = _vld2q_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" - )] - fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; - } - _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" - )] - fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: float32x2x2_t = _vld2_lane_f32(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" - )] - fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) - -> float32x4x2_t; - } - _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" - )] - fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) - -> float32x4x2_t; - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: float32x4x2_t = _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" - )] - fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; - } - _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" - )] - fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x2_t = _vld2_lane_s8(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" - )] - fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; - } - _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" - )] - fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: int16x4x2_t = _vld2_lane_s16(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" - )] - fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; - } - _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" - )] - fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; - } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x2_t = _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" - )] - fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; - } - _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" - )] - fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; - } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: int32x2x2_t = _vld2_lane_s32(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" - )] - fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; - } - _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" - )] - fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: int32x4x2_t = _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] - fn _vld2_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x2_t; - } - _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] - fn _vld2_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x2_t; - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: float32x2x2_t = _vld2_lane_f32(a as _, b.0, b.1, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] - fn _vld2q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x2_t; - } - _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] - fn _vld2q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x2_t; - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: float32x4x2_t = _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] - fn _vld2q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x2_t; - } - _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] - fn _vld2q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x2_t; - } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x2_t = _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] - fn _vld2q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x2_t; - } - _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] - fn _vld2q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x2_t; - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: int32x4x2_t = _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] - fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) - -> int8x8x2_t; - } - _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] - fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) - -> int8x8x2_t; - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x2_t = _vld2_lane_s8(a as _, b.0, b.1, LANE, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] - fn _vld2_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x2_t; - } - _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] - fn _vld2_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x2_t; - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: int16x4x2_t = _vld2_lane_s16(a as _, b.0, b.1, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] - fn _vld2_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x2_t; - } - _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] - fn _vld2_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x2_t; - } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: int32x2x2_t = _vld2_lane_s32(a as _, b.0, b.1, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint8x8x2_t = transmute(vld2_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: uint16x4x2_t = transmute(vld2_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint16x8x2_t = transmute(vld2q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - let mut ret_val: uint32x2x2_t = transmute(vld2_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2q_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: uint32x4x2_t = transmute(vld2q_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly8x8x2_t = transmute(vld2_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - let mut ret_val: poly16x4x2_t = transmute(vld2_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly16x8x2_t = transmute(vld2q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_s64(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] - fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; - } - _vld2_s64(a as *const i8, 8) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1i64.p0" - )] - fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; - } - _vld2_s64(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_s64(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { - let mut ret_val: uint8x8x2_t = transmute(vld2_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - let mut ret_val: uint8x16x2_t = transmute(vld2q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - let mut ret_val: uint16x4x2_t = transmute(vld2_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - let mut ret_val: uint16x8x2_t = transmute(vld2q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_s32(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - let mut ret_val: uint32x2x2_t = transmute(vld2_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_s32(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - let mut ret_val: uint32x4x2_t = transmute(vld2q_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - let mut ret_val: poly8x8x2_t = transmute(vld2_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_s8(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - let mut ret_val: poly8x16x2_t = transmute(vld2q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - let mut ret_val: poly16x4x2_t = transmute(vld2_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - let mut ret_val: poly16x8x2_t = transmute(vld2q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" - )] - fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; - } - _vld3_dup_f32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" - )] - fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; - } - let mut ret_val: float32x2x3_t = _vld3_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" - )] - fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; - } - _vld3q_dup_f32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" - )] - fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; - } - let mut ret_val: float32x4x3_t = _vld3q_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" - )] - fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; - } - _vld3_dup_s8(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" - )] - fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; - } - let mut ret_val: int8x8x3_t = _vld3_dup_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" - )] - fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; - } - _vld3q_dup_s8(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" - )] - fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; - } - let mut ret_val: int8x16x3_t = _vld3q_dup_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" - )] - fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; - } - _vld3_dup_s16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" - )] - fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; - } - let mut ret_val: int16x4x3_t = _vld3_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" - )] - fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; - } - _vld3q_dup_s16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" - )] - fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; - } - let mut ret_val: int16x8x3_t = _vld3q_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" - )] - fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; - } - _vld3_dup_s32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" - )] - fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; - } - let mut ret_val: int32x2x3_t = _vld3_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" - )] - fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; - } - _vld3q_dup_s32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" - )] - fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; - } - let mut ret_val: int32x4x3_t = _vld3q_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" - )] - fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; - } - _vld3_dup_s64(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] - fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - _vld3_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] - fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - let mut ret_val: float32x2x3_t = _vld3_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] - fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - _vld3q_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] - fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - let mut ret_val: float32x4x3_t = _vld3q_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] - fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] - fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - let mut ret_val: int8x8x3_t = _vld3_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] - fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - _vld3q_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] - fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - let mut ret_val: int8x16x3_t = _vld3q_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] - fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - _vld3_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] - fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - let mut ret_val: int16x4x3_t = _vld3_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] - fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] - fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - let mut ret_val: int16x8x3_t = _vld3q_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] - fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - _vld3_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] - fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - let mut ret_val: int32x2x3_t = _vld3_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] - fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - _vld3q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] - fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - let mut ret_val: int32x4x3_t = _vld3q_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] - fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; - } - _vld3_dup_s64(a as *const i8, 8) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { - let mut ret_val: uint8x8x3_t = transmute(vld3_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { - let mut ret_val: uint8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { - let mut ret_val: uint16x4x3_t = transmute(vld3_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { - let mut ret_val: uint16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_dup_s32(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { - let mut ret_val: uint32x2x3_t = transmute(vld3_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_dup_s32(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { - let mut ret_val: uint32x4x3_t = transmute(vld3q_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { - let mut ret_val: poly8x8x3_t = transmute(vld3_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { - let mut ret_val: poly8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { - let mut ret_val: poly16x4x3_t = transmute(vld3_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { - let mut ret_val: poly16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0" - )] - fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; - } - _vld3_f32(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0" - )] - fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; - } - let mut ret_val: float32x2x3_t = _vld3_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0" - )] - fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; - } - _vld3q_f32(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0" - )] - fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; - } - let mut ret_val: float32x4x3_t = _vld3q_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0" - )] - fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; - } - _vld3_s8(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0" - )] - fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; - } - let mut ret_val: int8x8x3_t = _vld3_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0" - )] - fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; - } - _vld3q_s8(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0" - )] - fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; - } - let mut ret_val: int8x16x3_t = _vld3q_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0" - )] - fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; - } - _vld3_s16(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0" - )] - fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; - } - let mut ret_val: int16x4x3_t = _vld3_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0" - )] - fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; - } - _vld3q_s16(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0" - )] - fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; - } - let mut ret_val: int16x8x3_t = _vld3q_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0" - )] - fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; - } - _vld3_s32(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0" - )] - fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; - } - let mut ret_val: int32x2x3_t = _vld3_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0" - )] - fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; - } - _vld3q_s32(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0" - )] - fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; - } - let mut ret_val: int32x4x3_t = _vld3q_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] - fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - _vld3_f32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] - fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - let mut ret_val: float32x2x3_t = _vld3_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] - fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - _vld3q_f32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] - fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - let mut ret_val: float32x4x3_t = _vld3q_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] - fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_s8(a as *const i8, 1) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] - fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - let mut ret_val: int8x8x3_t = _vld3_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] - fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - _vld3q_s8(a as *const i8, 1) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] - fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - let mut ret_val: int8x16x3_t = _vld3q_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] - fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - _vld3_s16(a as *const i8, 2) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] - fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - let mut ret_val: int16x4x3_t = _vld3_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] - fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_s16(a as *const i8, 2) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] - fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - let mut ret_val: int16x8x3_t = _vld3q_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] - fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - _vld3_s32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] - fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - let mut ret_val: int32x2x3_t = _vld3_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] - fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - _vld3q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] - fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - let mut ret_val: int32x4x3_t = _vld3q_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" - )] - fn _vld3_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x3_t; - } - _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" - )] - fn _vld3_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x3_t; - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: float32x2x3_t = _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" - )] - fn _vld3q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" - )] - fn _vld3q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x3_t; - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: float32x4x3_t = _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] - fn _vld3_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x3_t; - } - _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] - fn _vld3_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x3_t; - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: float32x2x3_t = _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" - )] - fn _vld3_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x3_t; - } - _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" - )] - fn _vld3_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x3_t; - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x3_t = _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" - )] - fn _vld3_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x3_t; - } - _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" - )] - fn _vld3_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x3_t; - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: int16x4x3_t = _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" - )] - fn _vld3q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" - )] - fn _vld3q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x3_t; - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x3_t = _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" - )] - fn _vld3_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x3_t; - } - _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" - )] - fn _vld3_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x3_t; - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: int32x2x3_t = _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" - )] - fn _vld3q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" - )] - fn _vld3q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x3_t; - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: int32x4x3_t = _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] - fn _vld3_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x3_t; - } - _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] - fn _vld3_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x3_t; - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x3_t = _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] - fn _vld3_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x3_t; - } - _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] - fn _vld3_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x3_t; - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: int16x4x3_t = _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] - fn _vld3q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x3_t; - } - _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] - fn _vld3q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x3_t; - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x3_t = _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] - fn _vld3_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x3_t; - } - _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] - fn _vld3_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x3_t; - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: int32x2x3_t = _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] - fn _vld3q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x3_t; - } - _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] - fn _vld3q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x3_t; - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: int32x4x3_t = _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint8x8x3_t = transmute(vld3_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: uint16x4x3_t = transmute(vld3_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint16x8x3_t = transmute(vld3q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - let mut ret_val: uint32x2x3_t = transmute(vld3_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3q_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: uint32x4x3_t = transmute(vld3q_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly8x8x3_t = transmute(vld3_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: poly16x4x3_t = transmute(vld3_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly16x8x3_t = transmute(vld3q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1i64.p0" - )] - fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; - } - _vld3_s64(a as _) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] - fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; - } - _vld3_s64(a as *const i8, 8) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_s32(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_s32(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - let mut ret_val: float32x4x3_t = _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] - fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] - fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - let mut ret_val: float32x2x4_t = _vld4_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] - fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - _vld4q_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] - fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - let mut ret_val: float32x4x4_t = _vld4q_dup_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] - fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - _vld4_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] - fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - let mut ret_val: int8x8x4_t = _vld4_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] - fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] - fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - let mut ret_val: int8x16x4_t = _vld4q_dup_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] - fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] - fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - let mut ret_val: int16x4x4_t = _vld4_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] - fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] - fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - let mut ret_val: int16x8x4_t = _vld4q_dup_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] - fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] - fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - let mut ret_val: int32x2x4_t = _vld4_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] - fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] - fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - let mut ret_val: int32x4x4_t = _vld4q_dup_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" - )] - fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; - } - _vld4_dup_f32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32.p0" - )] - fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; - } - let mut ret_val: float32x2x4_t = _vld4_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" - )] - fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; - } - _vld4q_dup_f32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32.p0" - )] - fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; - } - let mut ret_val: float32x4x4_t = _vld4q_dup_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" - )] - fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; - } - _vld4_dup_s8(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8.p0" - )] - fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; - } - let mut ret_val: int8x8x4_t = _vld4_dup_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" - )] - fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; - } - _vld4q_dup_s8(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8.p0" - )] - fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; - } - let mut ret_val: int8x16x4_t = _vld4q_dup_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" - )] - fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; - } - _vld4_dup_s16(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16.p0" - )] - fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; - } - let mut ret_val: int16x4x4_t = _vld4_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" - )] - fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; - } - _vld4q_dup_s16(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16.p0" - )] - fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; - } - let mut ret_val: int16x8x4_t = _vld4q_dup_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" - )] - fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; - } - _vld4_dup_s32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32.p0" - )] - fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; - } - let mut ret_val: int32x2x4_t = _vld4_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" - )] - fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; - } - _vld4q_dup_s32(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32.p0" - )] - fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; - } - let mut ret_val: int32x4x4_t = _vld4q_dup_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64.p0" - )] - fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; - } - _vld4_dup_s64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(nop))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] - fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_dup_s64(a as *const i8, 8) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_dup_s32(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_dup_s32(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0" - )] - fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; - } - _vld4_f32(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0" - )] - fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; - } - let mut ret_val: float32x2x4_t = _vld4_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0" - )] - fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; - } - _vld4q_f32(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0" - )] - fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; - } - let mut ret_val: float32x4x4_t = _vld4q_f32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0" - )] - fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; - } - _vld4_s8(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0" - )] - fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; - } - let mut ret_val: int8x8x4_t = _vld4_s8(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0" - )] - fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; - } - _vld4q_s8(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0" - )] - fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; - } - let mut ret_val: int8x16x4_t = _vld4q_s8(a as _); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0" - )] - fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; - } - _vld4_s16(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0" - )] - fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; - } - let mut ret_val: int16x4x4_t = _vld4_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0" - )] - fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; - } - _vld4q_s16(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0" - )] - fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; - } - let mut ret_val: int16x8x4_t = _vld4q_s16(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0" - )] - fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; - } - _vld4_s32(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0" - )] - fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; - } - let mut ret_val: int32x2x4_t = _vld4_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0" - )] - fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; - } - _vld4q_s32(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0" - )] - fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; - } - let mut ret_val: int32x4x4_t = _vld4q_s32(a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] - fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_f32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] - fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - let mut ret_val: float32x2x4_t = _vld4_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] - fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - _vld4q_f32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] - fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; - } - let mut ret_val: float32x4x4_t = _vld4q_f32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] - fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - _vld4_s8(a as *const i8, 1) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] - fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; - } - let mut ret_val: int8x8x4_t = _vld4_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] - fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_s8(a as *const i8, 1) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] - fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - let mut ret_val: int8x16x4_t = _vld4q_s8(a as *const i8, 1); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] - fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_s16(a as *const i8, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] - fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - let mut ret_val: int16x4x4_t = _vld4_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] - fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_s16(a as *const i8, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] - fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - let mut ret_val: int16x8x4_t = _vld4q_s16(a as *const i8, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] - fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_s32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] - fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - let mut ret_val: int32x2x4_t = _vld4_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] - fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_s32(a as *const i8, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] - fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - let mut ret_val: int32x4x4_t = _vld4q_s32(a as *const i8, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" - )] - fn _vld4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x4_t; - } - _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" - )] - fn _vld4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x4_t; - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: float32x2x4_t = _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" - )] - fn _vld4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" - )] - fn _vld4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x4_t; - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: float32x4x4_t = _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" - )] - fn _vld4_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x4_t; - } - _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" - )] - fn _vld4_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x4_t; - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x4_t = _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" - )] - fn _vld4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x4_t; - } - _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" - )] - fn _vld4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x4_t; - } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: int16x4x4_t = _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" - )] - fn _vld4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" - )] - fn _vld4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x4_t; - } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x4_t = _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" - )] - fn _vld4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x4_t; - } - _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" - )] - fn _vld4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x4_t; - } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: int32x2x4_t = _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" - )] - fn _vld4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x4_t; - } - _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" - )] - fn _vld4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x4_t; - } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: int32x4x4_t = _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] - fn _vld4_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x4_t; - } - _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] - fn _vld4_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x4_t; - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: float32x2x4_t = _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] - fn _vld4q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x4_t; - } - _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] - fn _vld4q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x4_t; - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: float32x4x4_t = _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] - fn _vld4_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x4_t; - } - _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] - fn _vld4_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x4_t; - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int8x8x4_t = _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] - fn _vld4_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x4_t; - } - _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] - fn _vld4_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x4_t; - } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: int16x4x4_t = _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] - fn _vld4q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x4_t; - } - _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] - fn _vld4q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x4_t; - } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: int16x8x4_t = _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] - fn _vld4_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x4_t; - } - _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] - fn _vld4_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x4_t; - } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: int32x2x4_t = _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] - fn _vld4q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x4_t; - } - _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] - fn _vld4q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x4_t; - } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: int32x4x4_t = _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint8x8x4_t = transmute(vld4_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: uint16x4x4_t = transmute(vld4_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: uint16x8x4_t = transmute(vld4q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - let mut ret_val: uint32x2x4_t = transmute(vld4_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4q_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: uint32x4x4_t = transmute(vld4q_lane_s32::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly8x8x4_t = transmute(vld4_lane_s8::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - let mut ret_val: poly16x4x4_t = transmute(vld4_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let mut ret_val: poly16x8x4_t = transmute(vld4q_lane_s16::(transmute(a), transmute(b))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0" - )] - fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; - } - _vld4_s64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] - fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_s64(a as *const i8, 8) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_s16(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_s16(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_s32(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_s32(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.2 = simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val.3 = simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_s16(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_s16(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); - ret_val -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f32" - )] - fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmax_f32(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f32" - )] - fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vmax_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f32" - )] - fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmaxq_f32(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f32" - )] - fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vmaxq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i8" - )] - fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmax_s8(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i8" - )] - fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vmax_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v16i8" - )] - fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vmaxq_s8(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v16i8" - )] - fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vmaxq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i16" - )] - fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmax_s16(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i16" - )] - fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vmax_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i16" - )] - fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vmaxq_s16(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i16" - )] - fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vmaxq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v2i32" - )] - fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmax_s32(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v2i32" - )] - fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vmax_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i32" - )] - fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vmaxq_s32(a, b) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i32" - )] - fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vmaxq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i8" - )] - fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i8" - )] - fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v16i8" - )] - fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v16i8" - )] - fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i16" - )] - fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i16" - )] - fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i16" - )] - fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i16" - )] - fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v2i32" - )] - fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v2i32" - )] - fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i32" - )] - fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i32" - )] - fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmaxnm_f32(a, b) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vmaxnm_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vmaxnmq_f32(a, b) -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vmaxnmq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f32" - )] - fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vmin_f32(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f32" - )] - fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vmin_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f32" - )] - fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vminq_f32(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f32" - )] - fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vminq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i8" - )] - fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmin_s8(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i8" - )] - fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vmin_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v16i8" - )] - fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vminq_s8(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v16i8" - )] - fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vminq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i16" - )] - fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmin_s16(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i16" - )] - fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vmin_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i16" - )] - fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vminq_s16(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i16" - )] - fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vminq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v2i32" - )] - fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmin_s32(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v2i32" - )] - fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vmin_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i32" - )] - fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vminq_s32(a, b) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i32" - )] - fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vminq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i8" - )] - fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i8" - )] - fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v16i8" - )] - fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v16i8" - )] - fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i16" - )] - fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i16" - )] - fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i16" - )] - fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i16" - )] - fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v2i32" - )] - fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v2i32" - )] - fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i32" - )] - fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i32" - )] - fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" - )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vminnm_f32(a, b) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" - )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vminnm_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vminnmq_f32(a, b) -} -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vminnmq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x2_t = vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x4_t = vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = vmlaq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = vmla_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = vmla_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x8_t = vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x8_t = vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vmlaq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vmlaq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x2_t = vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x2_t = vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x4_t = vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x4_t = vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlaq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlaq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmla_f32(a, b, vdup_n_f32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = vmla_f32(a, b, vdup_n_f32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlaq_f32(a, b, vdupq_n_f32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = vmlaq_f32(a, b, vdupq_n_f32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmla_s16(a, b, vdup_n_s16(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = vmla_s16(a, b, vdup_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlaq_s16(a, b, vdupq_n_s16(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vmlaq_s16(a, b, vdupq_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmla_u16(a, b, vdup_n_u16(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vmla_u16(a, b, vdup_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlaq_u16(a, b, vdupq_n_u16(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vmlaq_u16(a, b, vdupq_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmla_s32(a, b, vdup_n_s32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = vmla_s32(a, b, vdup_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlaq_s32(a, b, vdupq_n_s32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlaq_s32(a, b, vdupq_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmla_u32(a, b, vdup_n_u32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = vmla_u32(a, b, vdup_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlaq_u32(a, b, vdupq_n_u32(c)) -} -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlaq_u32(a, b, vdupq_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x2_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - simd_add(a, simd_mul(b, c)) -} -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_add(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlal_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlal_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlal_s16(a, b, vdup_n_s16(c)) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlal_s16(a, b, vdup_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlal_s32(a, b, vdup_n_s32(c)) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vmlal_s32(a, b, vdup_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlal_u16(a, b, vdup_n_u16(c)) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlal_u16(a, b, vdup_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlal_u32(a, b, vdup_n_u32(c)) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vmlal_u32(a, b, vdup_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - simd_add(a, vmull_s8(b, c)) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_add(a, vmull_s8(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - simd_add(a, vmull_s16(b, c)) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_add(a, vmull_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - simd_add(a, vmull_s32(b, c)) -} -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = simd_add(a, vmull_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - simd_add(a, vmull_u8(b, c)) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_add(a, vmull_u8(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - simd_add(a, vmull_u16(b, c)) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_add(a, vmull_u16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - simd_add(a, vmull_u32(b, c)) -} -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = simd_add(a, vmull_u32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x2_t = vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: float32x4_t = vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: float32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: float32x4_t = vmlsq_f32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = vmls_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = vmls_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x8_t = vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x8_t = vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, -) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vmlsq_s16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, -) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vmlsq_u16( - a, - b, - simd_shuffle!( - c, - c, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x2_t = vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x2_t = vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x4_t = vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x4_t = vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlsq_s32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlsq_u32( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmls_f32(a, b, vdup_n_f32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = vmls_f32(a, b, vdup_n_f32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlsq_f32(a, b, vdupq_n_f32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = vmlsq_f32(a, b, vdupq_n_f32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmls_s16(a, b, vdup_n_s16(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = vmls_s16(a, b, vdup_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlsq_s16(a, b, vdupq_n_s16(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vmlsq_s16(a, b, vdupq_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmls_u16(a, b, vdup_n_u16(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vmls_u16(a, b, vdup_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlsq_u16(a, b, vdupq_n_u16(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vmlsq_u16(a, b, vdupq_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmls_s32(a, b, vdup_n_s32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = vmls_s32(a, b, vdup_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlsq_s32(a, b, vdupq_n_s32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlsq_s32(a, b, vdupq_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmls_u32(a, b, vdup_n_u32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = vmls_u32(a, b, vdup_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlsq_u32(a, b, vdupq_n_u32(c)) -} -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlsq_u32(a, b, vdupq_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint32x2_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - simd_sub(a, simd_mul(b, c)) -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_sub(a, simd_mul(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmlsl_s16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x4_t, -) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, -) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmlsl_u16( - a, - b, - simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, -) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlsl_s16(a, b, vdup_n_s16(c)) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmlsl_s16(a, b, vdup_n_s16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlsl_s32(a, b, vdup_n_s32(c)) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vmlsl_s32(a, b, vdup_n_s32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlsl_u16(a, b, vdup_n_u16(c)) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmlsl_u16(a, b, vdup_n_u16(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlsl_u32(a, b, vdup_n_u32(c)) -} -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vmlsl_u32(a, b, vdup_n_u32(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - simd_sub(a, vmull_s8(b, c)) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_sub(a, vmull_s8(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - simd_sub(a, vmull_s16(b, c)) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_sub(a, vmull_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - simd_sub(a, vmull_s32(b, c)) -} -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = simd_sub(a, vmull_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - simd_sub(a, vmull_u8(b, c)) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_sub(a, vmull_u8(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - simd_sub(a, vmull_u16(b, c)) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_sub(a, vmull_u16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - simd_sub(a, vmull_u32(b, c)) -} -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: uint64x2_t = simd_sub(a, vmull_u32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] - fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vmmlaq_s32(a, b, c) -} -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smmla) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] - fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int32x4_t = _vmmlaq_s32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ummla) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() -} -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ummla) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint32x4_t = - _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x8_t = simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x8_t = simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_mul( - a, - simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x2_t = simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_mul( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { - simd_mul(a, vdup_n_f32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_mul(a, vdup_n_f32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { - simd_mul(a, vdupq_n_f32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_mul(a, vdupq_n_f32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - simd_mul(a, vdup_n_s16(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_mul(a, vdup_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - simd_mul(a, vdupq_n_s16(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_mul(a, vdupq_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - simd_mul(a, vdup_n_s32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = simd_mul(a, vdup_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - simd_mul(a, vdupq_n_s32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_mul(a, vdupq_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { - simd_mul(a, vdup_n_u16(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_mul(a, vdup_n_u16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { - simd_mul(a, vdupq_n_u16(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_mul(a, vdupq_n_u16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { - simd_mul(a, vdup_n_u32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = simd_mul(a, vdup_n_u32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { - simd_mul(a, vdupq_n_u32(b)) -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_mul(a, vdupq_n_u32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v8i8" - )] - fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - } - _vmul_p8(a, b) -} -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v8i8" - )] - fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - } - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = _vmul_p8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v16i8" - )] - fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - } - _vmulq_p8(a, b) -} -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v16i8" - )] - fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - } - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = _vmulq_p8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_mul(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_mul(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_mul(a, b) -} -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_mul(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = vmull_s16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int64x2_t = vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = vmull_u16( - a, - simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) -} -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint64x2_t = vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vmull_s16(a, vdup_n_s16(b)) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vmull_s16(a, vdup_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vmull_s32(a, vdup_n_s32(b)) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = vmull_s32(a, vdup_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { - vmull_u16(a, vdup_n_u16(b)) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vmull_u16(a, vdup_n_u16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { - vmull_u32(a, vdup_n_u32(b)) -} -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = vmull_u32(a, vdup_n_u32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] - fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; - } - _vmull_p8(a, b) -} -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] - fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; - } - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = _vmull_p8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] - fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - _vmull_s16(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] - fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] - fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - _vmull_s32(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] - fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] - fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - _vmull_s8(a, b) -} -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] - fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vmull_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] - fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] - fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] - fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] - fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] - fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] - fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_neg(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { - simd_neg(a) -} -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_neg(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_or(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_or(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_or(a, b) -} -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_or(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - let x: int16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s8(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: int16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s8(b), a); - }; - let ret_val: int16x4_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let x: int16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s8(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x: int16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s8(b), a); - }; - let ret_val: int16x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - let x: int32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s16(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let x: int32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s16(b), a); - }; - let ret_val: int32x2_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let x: int32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s16(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: int32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s16(b), a); - }; - let ret_val: int32x4_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - let x: int64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s32(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let x: int64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_s32(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let x: int64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s32(b), a); - }; - x -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let x: int64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_s32(b), a); - }; - let ret_val: int64x2_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - let x: uint16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u8(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: uint16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u8(b), a); - }; - let ret_val: uint16x4_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let x: uint16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u8(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let x: uint16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u8(b), a); - }; - let ret_val: uint16x8_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - let x: uint32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u16(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let x: uint32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u16(b), a); - }; - let ret_val: uint32x2_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let x: uint32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u16(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: uint32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u16(b), a); - }; - let ret_val: uint32x4_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - let x: uint64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u32(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let x: uint64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddl_u32(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let x: uint64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u32(b), a); - }; - x -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let x: uint64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = simd_add(vpaddlq_u32(b), a); - }; - let ret_val: uint64x2_t = x; - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(faddp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f32" - )] - fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpadd_f32(a, b) -} -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(faddp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f32" - )] - fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vpadd_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] - fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vpadd_s8(a, b) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] - fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vpadd_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] - fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vpadd_s16(a, b) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] - fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vpadd_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] - fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vpadd_s32(a, b) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] - fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vpadd_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - transmute(vpadd_s8(transmute(a), transmute(b))) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vpadd_s8(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - transmute(vpadd_s16(transmute(a), transmute(b))) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(vpadd_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - transmute(vpadd_s32(transmute(a), transmute(b))) -} -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); - let ret_val: uint32x2_t = transmute(vpadd_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] - fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; - } - _vpaddl_s8(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] - fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = _vpaddl_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] - fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; - } - _vpaddlq_s8(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] - fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int16x8_t = _vpaddlq_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] - fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; - } - _vpaddl_s16(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] - fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x2_t = _vpaddl_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] - fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; - } - _vpaddlq_s16(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] - fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x4_t = _vpaddlq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] - fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; - } - _vpaddl_s32(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] - fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpaddl_s32(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] - fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; - } - _vpaddlq_s32(a) -} -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] - fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int64x2_t = _vpaddlq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] - fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; - } - _vpaddl_u8(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] - fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x4_t = _vpaddl_u8(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] - fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; - } - _vpaddlq_u8(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] - fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint16x8_t = _vpaddlq_u8(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] - fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; - } - _vpaddl_u16(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] - fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x2_t = _vpaddl_u16(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] - fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; - } - _vpaddlq_u16(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] - fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint32x4_t = _vpaddlq_u16(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] - fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; - } - _vpaddl_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] - fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - _vpaddl_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] - fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; - } - _vpaddlq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] - fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint64x2_t = _vpaddlq_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] - fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpmax_f32(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] - fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vpmax_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] - fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vpmax_s8(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] - fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vpmax_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] - fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vpmax_s16(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] - fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vpmax_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] - fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vpmax_s32(a, b) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] - fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vpmax_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] - fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] - fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] - fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] - fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] - fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] - fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] - fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vpmin_f32(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] - fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vpmin_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] - fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vpmin_s8(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] - fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vpmin_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] - fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vpmin_s16(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] - fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vpmin_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] - fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vpmin_s32(a, b) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] - fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vpmin_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] - fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] - fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] - fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] - fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] - fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] - fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] - fn _vqabs_s8(a: int8x8_t) -> int8x8_t; - } - _vqabs_s8(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] - fn _vqabs_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqabs_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] - fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; - } - _vqabsq_s8(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] - fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqabsq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] - fn _vqabs_s16(a: int16x4_t) -> int16x4_t; - } - _vqabs_s16(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] - fn _vqabs_s16(a: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqabs_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] - fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; - } - _vqabsq_s16(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] - fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqabsq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] - fn _vqabs_s32(a: int32x2_t) -> int32x2_t; - } - _vqabs_s32(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] - fn _vqabs_s32(a: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqabs_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] - fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; - } - _vqabsq_s32(a) -} -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] - fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqabsq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] - fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqadd_s8(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] - fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqadd_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] - fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqaddq_s8(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] - fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqaddq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] - fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqadd_s16(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] - fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqadd_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] - fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqaddq_s16(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] - fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqaddq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] - fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqadd_s32(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] - fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqadd_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] - fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqaddq_s32(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] - fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqaddq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] - fn _vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqadd_s64(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] - fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqaddq_s64(a, b) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] - fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vqaddq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] - fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] - fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] - fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] - fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] - fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] - fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] - fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] - fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] - fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] - fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] - fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] - fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] - fn _vqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] - fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] - fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 2) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s32(a, vqdmull_lane_s16::(b, c)) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 2) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_lane_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqaddq_s64(a, vqdmull_lane_s32::(b, c)) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal, N = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_lane_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqaddq_s32(a, vqdmull_n_s16(b, c)) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_n_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqaddq_s64(a, vqdmull_n_s32(b, c)) -} -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_n_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqaddq_s32(a, vqdmull_s16(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqaddq_s32(a, vqdmull_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqaddq_s64(a, vqdmull_s32(b, c)) -} -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqaddq_s64(a, vqdmull_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 2) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s32(a, vqdmull_lane_s16::(b, c)) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 2) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_lane_s16( - a: int32x4_t, - b: int16x4_t, - c: int16x4_t, -) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_lane_s16::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqsubq_s64(a, vqdmull_lane_s32::(b, c)) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl, N = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_lane_s32( - a: int64x2_t, - b: int32x2_t, - c: int32x2_t, -) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_lane_s32::(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqsubq_s32(a, vqdmull_n_s16(b, c)) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_n_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqsubq_s64(a, vqdmull_n_s32(b, c)) -} -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_n_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqsubq_s32(a, vqdmull_s16(b, c)) -} -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqsubq_s32(a, vqdmull_s16(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqsubq_s64(a, vqdmull_s32(b, c)) -} -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = vqsubq_s64(a, vqdmull_s32(b, c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x4_t = vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x2_t = vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) -} -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let b: int16x4_t = vdup_n_s16(b); - vqdmulh_s16(a, b) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = vdup_n_s16(b); - let ret_val: int16x4_t = vqdmulh_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let b: int16x8_t = vdupq_n_s16(b); - vqdmulhq_s16(a, b) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = vdupq_n_s16(b); - let ret_val: int16x8_t = vqdmulhq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let b: int32x2_t = vdup_n_s32(b); - vqdmulh_s32(a, b) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = vdup_n_s32(b); - let ret_val: int32x2_t = vqdmulh_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let b: int32x4_t = vdupq_n_s32(b); - vqdmulhq_s32(a, b) -} -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = vdupq_n_s32(b); - let ret_val: int32x4_t = vqdmulhq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i16" - )] - fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqdmulh_s16(a, b) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i16" - )] - fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqdmulh_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v8i16" - )] - fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqdmulhq_s16(a, b) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v8i16" - )] - fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqdmulhq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v2i32" - )] - fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqdmulh_s32(a, b) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v2i32" - )] - fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqdmulh_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i32" - )] - fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqdmulhq_s32(a, b) -} -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i32" - )] - fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqdmulhq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - vqdmull_s16(a, b) -} -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); - let ret_val: int32x4_t = vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - vqdmull_s32(a, b) -} -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); - let ret_val: int64x2_t = vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vqdmull_s16(a, vdup_n_s16(b)) -} -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqdmull_s16(a, vdup_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vqdmull_s32(a, vdup_n_s32(b)) -} -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = vqdmull_s32(a, vdup_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v4i32" - )] - fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - _vqdmull_s16(a, b) -} -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v4i32" - )] - fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqdmull_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v2i64" - )] - fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - _vqdmull_s32(a, b) -} -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v2i64" - )] - fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vqdmull_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v8i8" - )] - fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; - } - _vqmovn_s16(a) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v8i8" - )] - fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqmovn_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v4i16" - )] - fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; - } - _vqmovn_s32(a) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v4i16" - )] - fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqmovn_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v2i32" - )] - fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; - } - _vqmovn_s64(a) -} -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v2i32" - )] - fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqmovn_s64(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v8i8" - )] - fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; - } - _vqmovn_u16(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v8i8" - )] - fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqmovn_u16(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v4i16" - )] - fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; - } - _vqmovn_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v4i16" - )] - fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqmovn_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v2i32" - )] - fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; - } - _vqmovn_u64(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v2i32" - )] - fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqmovn_u64(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v8i8" - )] - fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; - } - _vqmovun_s16(a).as_unsigned() -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v8i8" - )] - fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqmovun_s16(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v4i16" - )] - fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; - } - _vqmovun_s32(a).as_unsigned() -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v4i16" - )] - fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqmovun_s32(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v2i32" - )] - fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; - } - _vqmovun_s64(a).as_unsigned() -} -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v2i32" - )] - fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqmovun_s64(a).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] - fn _vqneg_s8(a: int8x8_t) -> int8x8_t; - } - _vqneg_s8(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] - fn _vqneg_s8(a: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqneg_s8(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] - fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; - } - _vqnegq_s8(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] - fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqnegq_s8(a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] - fn _vqneg_s16(a: int16x4_t) -> int16x4_t; - } - _vqneg_s16(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] - fn _vqneg_s16(a: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqneg_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] - fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; - } - _vqnegq_s16(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] - fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqnegq_s16(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] - fn _vqneg_s32(a: int32x2_t) -> int32x2_t; - } - _vqneg_s32(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] - fn _vqneg_s32(a: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqneg_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] - fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; - } - _vqnegq_s32(a) -} -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] - fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqnegq_s32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulh_s16(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmulh_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - vqrdmulh_s32(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmulh_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulh_s16(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int16x4_t = vqrdmulh_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - vqrdmulh_s32(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vqrdmulh_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmulhq_s16(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmulhq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulhq_s32(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmulhq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - vqrdmulhq_s16(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!( - b, - b, - [ - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32, - LANE as u32 - ] - ); - let ret_val: int16x8_t = vqrdmulhq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vqrdmulhq_s32(a, b) -} -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vqrdmulhq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - vqrdmulh_s16(a, vdup_n_s16(b)) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = vqrdmulh_s16(a, vdup_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - vqrdmulhq_s16(a, vdupq_n_s16(b)) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vqrdmulhq_s16(a, vdupq_n_s16(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - vqrdmulh_s32(a, vdup_n_s32(b)) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = vqrdmulh_s32(a, vdup_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - vqrdmulhq_s32(a, vdupq_n_s32(b)) -} -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqrdmulhq_s32(a, vdupq_n_s32(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" - )] - fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqrdmulh_s16(a, b) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" - )] - fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrdmulh_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" - )] - fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqrdmulhq_s16(a, b) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" - )] - fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqrdmulhq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" - )] - fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqrdmulh_s32(a, b) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" - )] - fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqrdmulh_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" - )] - fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqrdmulhq_s32(a, b) -} -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" - )] - fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqrdmulhq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i8" - )] - fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqrshl_s8(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i8" - )] - fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqrshl_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v16i8" - )] - fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqrshlq_s8(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v16i8" - )] - fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqrshlq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i16" - )] - fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqrshl_s16(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i16" - )] - fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrshl_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i16" - )] - fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqrshlq_s16(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i16" - )] - fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqrshlq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i32" - )] - fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqrshl_s32(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i32" - )] - fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqrshl_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i32" - )] - fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqrshlq_s32(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i32" - )] - fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqrshlq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v1i64" - )] - fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqrshl_s64(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i64" - )] - fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqrshlq_s64(a, b) -} -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i64" - )] - fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vqrshlq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i8" - )] - fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqrshl_u8(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i8" - )] - fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqrshl_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v16i8" - )] - fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqrshlq_u8(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v16i8" - )] - fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqrshlq_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i16" - )] - fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqrshl_u16(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i16" - )] - fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqrshl_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i16" - )] - fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqrshlq_u16(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i16" - )] - fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqrshlq_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i32" - )] - fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqrshl_u32(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i32" - )] - fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vqrshl_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i32" - )] - fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqrshlq_u32(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i32" - )] - fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqrshlq_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v1i64" - )] - fn _vqrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqrshl_u64(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i64" - )] - fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqrshlq_u64(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i64" - )] - fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vqrshlq_u64(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] - fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] - fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] - fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] - fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] - fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] - fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v8i8" - )] - fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqrshrn_n_s16(a, N) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v8i8" - )] - fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqrshrn_n_s16(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v4i16" - )] - fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqrshrn_n_s32(a, N) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v4i16" - )] - fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqrshrn_n_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v2i32" - )] - fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqrshrn_n_s64(a, N) -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v2i32" - )] - fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqrshrn_n_s64(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] - fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqrshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] - fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqrshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] - fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqrshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] - fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqrshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] - fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqrshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] - fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqrshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v8i8" - )] - fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v8i8" - )] - fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqrshrn_n_u16(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v4i16" - )] - fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v4i16" - )] - fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqrshrn_n_u32(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v2i32" - )] - fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v2i32" - )] - fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqrshrn_n_u64(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] - fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqrshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] - fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqrshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] - fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqrshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] - fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqrshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] - fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] - fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = - _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v8i8" - )] - fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqrshrun_n_s16(a, N).as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v8i8" - )] - fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqrshrun_n_s16(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v4i16" - )] - fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqrshrun_n_s32(a, N).as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v4i16" - )] - fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqrshrun_n_s32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v2i32" - )] - fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqrshrun_n_s64(a, N).as_unsigned() -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v2i32" - )] - fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqrshrun_n_s64(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_s8(a, vdup_n_s8(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vqshl_s8(a, vdup_n_s8(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_s8(a, vdupq_n_s8(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vqshlq_s8(a, vdupq_n_s8(N as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_s16(a, vdup_n_s16(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = vqshl_s16(a, vdup_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_s16(a, vdupq_n_s16(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vqshlq_s16(a, vdupq_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_s32(a, vdup_n_s32(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = vqshl_s32(a, vdup_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_s32(a, vdupq_n_s32(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vqshlq_s32(a, vdupq_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_s64(a, vdup_n_s64(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_s64(a, vdupq_n_s64(N as _)) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = vqshlq_s64(a, vdupq_n_s64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_u8(a, vdup_n_s8(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = vqshl_u8(a, vdup_n_s8(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_u8(a, vdupq_n_s8(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = vqshlq_u8(a, vdupq_n_s8(N as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_u16(a, vdup_n_s16(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vqshl_u16(a, vdup_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_u16(a, vdupq_n_s16(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vqshlq_u16(a, vdupq_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_u32(a, vdup_n_s32(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = vqshl_u32(a, vdup_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_u32(a, vdupq_n_s32(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vqshlq_u32(a, vdupq_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_u64(a, vdup_n_s64(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_u64(a, vdupq_n_s64(N as _)) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = vqshlq_u64(a, vdupq_n_s64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i8" - )] - fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqshl_s8(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i8" - )] - fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqshl_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v16i8" - )] - fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqshlq_s8(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v16i8" - )] - fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqshlq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i16" - )] - fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqshl_s16(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i16" - )] - fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqshl_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i16" - )] - fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqshlq_s16(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i16" - )] - fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqshlq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i32" - )] - fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqshl_s32(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i32" - )] - fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqshl_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i32" - )] - fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqshlq_s32(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i32" - )] - fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqshlq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v1i64" - )] - fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqshl_s64(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i64" - )] - fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqshlq_s64(a, b) -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i64" - )] - fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vqshlq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i8" - )] - fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqshl_u8(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i8" - )] - fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshl_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v16i8" - )] - fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqshlq_u8(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v16i8" - )] - fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqshlq_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i16" - )] - fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqshl_u16(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i16" - )] - fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshl_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i16" - )] - fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqshlq_u16(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i16" - )] - fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqshlq_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i32" - )] - fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqshl_u32(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i32" - )] - fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vqshl_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i32" - )] - fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqshlq_u32(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i32" - )] - fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqshlq_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v1i64" - )] - fn _vqshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqshl_u64(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i64" - )] - fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqshlq_u64(a.as_signed(), b).as_unsigned() -} -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i64" - )] - fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vqshlq_u64(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; - } - _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; - } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; - } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i8" - )] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; - } - _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i8" - )] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshlu_n_s8( - a, - const { - int8x8_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v16i8" - )] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v16i8" - )] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqshluq_n_s8( - a, - const { - int8x16_t([ - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i16" - )] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i16" - )] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshlu_n_s16( - a, - const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i16" - )] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i16" - )] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqshluq_n_s16( - a, - const { - int16x8_t([ - N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i32" - )] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i32" - )] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i32" - )] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i32" - )] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqshluq_n_s32( - a, - const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v1i64" - )] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; - } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i64" - )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; - } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i64" - )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] - fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] - fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] - fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] - fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] - fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] - fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v8i8" - )] - fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrn_n_s16(a, N) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v8i8" - )] - fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqshrn_n_s16(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v4i16" - )] - fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrn_n_s32(a, N) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v4i16" - )] - fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqshrn_n_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v2i32" - )] - fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrn_n_s64(a, N) -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v2i32" - )] - fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vqshrn_n_s64(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshrn_n_u16( - a.as_signed(), - const { - uint16x8_t([ - -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, - -N as u16, - ]) - } - .as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v8i8" - )] - fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrn_n_u16(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v8i8" - )] - fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshrn_n_u16(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v4i16" - )] - fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrn_n_u32(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v4i16" - )] - fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshrn_n_u32(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v2i32" - )] - fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrn_n_u64(a.as_signed(), N).as_unsigned() -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v2i32" - )] - fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqshrn_n_u64(a.as_signed(), N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - .as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = - _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v8i8" - )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vqshrun_n_s16(a, N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v8i8" - )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqshrun_n_s16(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v4i16" - )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vqshrun_n_s32(a, N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v4i16" - )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqshrun_n_s32(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v2i32" - )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vqshrun_n_s64(a, N).as_unsigned() -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v2i32" - )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vqshrun_n_s64(a, N).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] - fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqsub_s8(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] - fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vqsub_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] - fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqsubq_s8(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] - fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vqsubq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] - fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqsub_s16(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] - fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vqsub_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] - fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqsubq_s16(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] - fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vqsubq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] - fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqsub_s32(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] - fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vqsub_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] - fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqsubq_s32(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] - fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vqsubq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] - fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqsub_s64(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] - fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqsubq_s64(a, b) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] - fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vqsubq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] - fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] - fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] - fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] - fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] - fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] - fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] - fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] - fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] - fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] - fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] - fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] - fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] - fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] - fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] - fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let x = vraddhn_s16(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x = vraddhn_s16(b, c); - let ret_val: int8x16_t = - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let x = vraddhn_s32(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let x = vraddhn_s32(b, c); - let ret_val: int16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let x = vraddhn_s64(b, c); - simd_shuffle!(a, x, [0, 1, 2, 3]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let x = vraddhn_s64(b, c); - let ret_val: int32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); - let ret_val: uint8x16_t = - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); - let ret_val: uint16x8_t = simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); - simd_shuffle!(a, x, [0, 1, 2, 3]) -} -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); - let ret_val: uint32x4_t = simd_shuffle!(a, x, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] - fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - _vraddhn_s16(a, b) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] - fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vraddhn_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] - fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - _vraddhn_s32(a, b) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] - fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vraddhn_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] - fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - _vraddhn_s64(a, b) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] - fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vraddhn_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - transmute(vraddhn_s16(transmute(a), transmute(b))) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - transmute(vraddhn_s32(transmute(a), transmute(b))) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - transmute(vraddhn_s64(transmute(a), transmute(b))) -} -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); - let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f32" - )] - fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; - } - _vrecpe_f32(a) -} -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f32" - )] - fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrecpe_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f32" - )] - fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; - } - _vrecpeq_f32(a) -} -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f32" - )] - fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrecpeq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v2i32" - )] - fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; - } - _vrecpe_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v2i32" - )] - fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vrecpe_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v4i32" - )] - fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; - } - _vrecpeq_u32(a.as_signed()).as_unsigned() -} -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v4i32" - )] - fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vrecpeq_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f32" - )] - fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vrecps_f32(a, b) -} -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f32" - )] - fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vrecps_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f32" - )] - fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vrecpsq_f32(a, b) -} -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f32" - )] - fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrecpsq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72246,24 +23798,44 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsq_lane_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72272,22 +23844,44 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vmlsq_laneq_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, +) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_s16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72296,28 +23890,44 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] +pub unsafe fn vmlsq_laneq_u16( + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, +) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_u16( + a, + b, + simd_shuffle!( + c, + c, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72326,22 +23936,27 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vmls_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72350,24 +23965,27 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmls_lane_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72376,22 +23994,27 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - transmute(a) +pub unsafe fn vmls_laneq_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72400,24 +24023,27 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmls_laneq_u32( + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72426,22 +24052,31 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - transmute(a) +pub unsafe fn vmlsq_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72450,24 +24085,31 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsq_lane_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72476,22 +24118,31 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - transmute(a) +pub unsafe fn vmlsq_laneq_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_s32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -72500,23 +24151,29 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsq_laneq_u32( + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsq_u32( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -72526,21 +24183,20 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - transmute(a) +pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmls_f32(a, b, vdup_n_f32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -72550,23 +24206,20 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlsq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72576,21 +24229,20 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - transmute(a) +pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmls_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72600,22 +24252,20 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlsq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72625,21 +24275,20 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmls_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72649,23 +24298,20 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlsq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72675,21 +24321,20 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmls_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72699,23 +24344,20 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlsq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72725,21 +24367,20 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmls_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72749,22 +24390,20 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlsq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72774,21 +24413,20 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + simd_sub(a, simd_mul(b, c)) +} +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72798,23 +24436,20 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72824,21 +24459,20 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72848,23 +24482,20 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72874,21 +24505,20 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - transmute(a) +pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72898,23 +24528,20 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72924,21 +24551,20 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - transmute(a) +pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72948,27 +24574,20 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -72978,21 +24597,20 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - transmute(a) +pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -73002,23 +24620,20 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -73028,21 +24643,20 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - transmute(a) +pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -73052,24 +24666,22 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + simd_sub(a, simd_mul(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73078,22 +24690,31 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - transmute(a) +pub unsafe fn vmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73102,24 +24723,31 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsl_laneq_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_s16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73128,22 +24756,27 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73152,24 +24785,27 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsl_laneq_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, +) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73178,22 +24814,31 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vmlsl_lane_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73202,24 +24847,31 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsl_laneq_u16( + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, +) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_u16( + a, + b, + simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73228,22 +24880,27 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vmlsl_lane_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73252,23 +24909,25 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsl_laneq_u32( + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, +) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73278,21 +24937,20 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlsl_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73302,27 +24960,20 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlsl_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73332,21 +24983,20 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlsl_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73356,23 +25006,20 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlsl_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73382,21 +25029,20 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - transmute(a) +pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + simd_sub(a, vmull_s8(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73406,23 +25052,20 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + simd_sub(a, vmull_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73432,21 +25075,20 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - transmute(a) +pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + simd_sub(a, vmull_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73456,23 +25098,20 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + simd_sub(a, vmull_u8(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73482,21 +25121,20 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - transmute(a) +pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + simd_sub(a, vmull_u16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -73506,73 +25144,82 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + simd_sub(a, vmull_u32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - transmute(a) +pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] + fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vmmlaq_s32(a, b, c) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] #[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ummla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] + fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -73582,21 +25229,20 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - transmute(a) +pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -73606,23 +25252,22 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73631,22 +25276,23 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73655,24 +25301,23 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73681,22 +25326,26 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73705,24 +25354,26 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73731,22 +25382,26 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73755,23 +25410,39 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73780,22 +25451,23 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73804,24 +25476,26 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73830,22 +25504,26 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73854,24 +25532,39 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73880,22 +25573,23 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - transmute(a) +pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73904,24 +25598,26 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73930,22 +25626,26 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - transmute(a) +pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73954,28 +25654,39 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] +pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -73984,22 +25695,23 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - transmute(a) +pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74008,24 +25720,26 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74034,22 +25748,26 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - transmute(a) +pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74058,24 +25776,39 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_mul( + a, + simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74084,22 +25817,23 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - transmute(a) +pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74108,23 +25842,24 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_mul( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74134,21 +25869,20 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { + simd_mul(a, vdup_n_f32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74158,27 +25892,20 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { + simd_mul(a, vdupq_n_f32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74188,21 +25915,20 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - transmute(a) -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + simd_mul(a, vdup_n_s16(b)) +} +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74212,23 +25938,20 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + simd_mul(a, vdupq_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74238,21 +25961,20 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + simd_mul(a, vdup_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74262,23 +25984,20 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + simd_mul(a, vdupq_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74288,21 +26007,20 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { + simd_mul(a, vdup_n_u16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74312,27 +26030,20 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { + simd_mul(a, vdupq_n_u16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74342,21 +26053,20 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { + simd_mul(a, vdup_n_u32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74366,23 +26076,20 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { + simd_mul(a, vdupq_n_u32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(pmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74392,21 +26099,28 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - transmute(a) +pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v8i8" + )] + fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; + } + _vmul_p8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(pmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74416,23 +26130,28 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v16i8" + )] + fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; + } + _vmulq_p8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74442,21 +26161,20 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - transmute(a) +pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74466,23 +26184,20 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74492,21 +26207,20 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - transmute(a) +pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74516,23 +26230,20 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74542,21 +26253,20 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - transmute(a) +pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74566,23 +26276,20 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74592,21 +26299,20 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - transmute(a) +pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74616,22 +26322,20 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74641,21 +26345,20 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74665,23 +26368,20 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74691,21 +26391,20 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -74715,24 +26414,22 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_mul(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74741,22 +26438,26 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74765,23 +26466,26 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_s16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74790,22 +26494,23 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74814,24 +26519,23 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74840,22 +26544,26 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74864,24 +26572,26 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_u16( + a, + simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]), + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74890,22 +26600,23 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - transmute(a) +pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -74914,23 +26625,21 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -74940,21 +26649,20 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - transmute(a) +pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vmull_s16(a, vdup_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -74964,27 +26672,20 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vmull_s32(a, vdup_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -74994,21 +26695,20 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - transmute(a) +pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { + vmull_u16(a, vdup_n_u16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75018,23 +26718,20 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { + vmull_u32(a, vdup_n_u32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(pmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75044,21 +26741,28 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - transmute(a) +pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmull.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] + fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; + } + _vmull_p8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75068,23 +26772,28 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] + fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75094,21 +26803,28 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - transmute(a) +pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] + fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75118,23 +26834,28 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] + fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + _vmull_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75144,21 +26865,28 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] + fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + } + _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75168,27 +26896,28 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] + fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -75198,21 +26927,28 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umull.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] + fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75222,23 +26958,20 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75248,21 +26981,20 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75272,23 +27004,20 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75298,21 +27027,20 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75322,27 +27050,20 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75352,21 +27073,20 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75376,23 +27096,20 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -75402,21 +27119,20 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { - transmute(a) +pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { + simd_neg(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75426,22 +27142,20 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75451,21 +27165,20 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { - transmute(a) +pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75475,22 +27188,20 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75500,21 +27211,20 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { - transmute(a) +pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75524,22 +27234,20 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75549,21 +27257,20 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { - transmute(a) +pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75573,21 +27280,20 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75597,21 +27303,20 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { - transmute(a) +pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75621,21 +27326,20 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75645,22 +27349,20 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75670,21 +27372,20 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75694,22 +27395,20 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75719,21 +27418,20 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75743,22 +27441,20 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75768,21 +27464,20 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -75792,22 +27487,20 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_or(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75817,21 +27510,29 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + let x: int16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s8(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75841,22 +27542,29 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let x: int16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s8(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75866,21 +27574,29 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - transmute(a) +pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + let x: int32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s16(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75890,23 +27606,29 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let x: int32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s16(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75916,21 +27638,29 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - transmute(a) +pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + let x: int64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_s32(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75940,27 +27670,29 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let x: int64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_s32(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75970,21 +27702,29 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - transmute(a) +pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + let x: uint16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u8(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -75994,23 +27734,29 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let x: uint16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u8(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u8(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76020,21 +27766,29 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - transmute(a) +pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + let x: uint32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u16(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76044,23 +27798,29 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let x: uint32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u16(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u16(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76070,21 +27830,29 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - transmute(a) +pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + let x: uint64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddl_u32(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76094,23 +27862,29 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let x: uint64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u32(a, b); + } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = simd_add(vpaddlq_u32(b), a); + }; + x } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(faddp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76120,21 +27894,28 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f32" + )] + fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpadd_f32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76144,27 +27925,28 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] + fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpadd_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76174,21 +27956,28 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] + fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpadd_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76198,23 +27987,29 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] + fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpadd_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76224,21 +28019,21 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + transmute(vpadd_s8(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76248,23 +28043,24 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vpadd_s8(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76274,21 +28070,21 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + transmute(vpadd_s16(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76298,27 +28094,24 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vpadd_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76328,21 +28121,21 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + transmute(vpadd_s32(transmute(a), transmute(b))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76352,23 +28145,23 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vpadd_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76378,21 +28171,28 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - transmute(a) +pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] + fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; + } + _vpaddl_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76402,23 +28202,28 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] + fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; + } + _vpaddlq_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76428,21 +28233,28 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - transmute(a) +pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] + fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; + } + _vpaddl_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76452,23 +28264,28 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] + fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; + } + _vpaddlq_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76478,21 +28295,28 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - transmute(a) +pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] + fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; + } + _vpaddl_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76502,23 +28326,28 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] + fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; + } + _vpaddlq_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76528,21 +28357,28 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - transmute(a) +pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] + fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; + } + _vpaddl_u8(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76552,23 +28388,28 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] + fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; + } + _vpaddlq_u8(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76578,21 +28419,28 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - transmute(a) +pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] + fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; + } + _vpaddl_u16(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76602,22 +28450,28 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] + fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; + } + _vpaddlq_u16(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76627,21 +28481,28 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] + fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; + } + _vpaddl_u32(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76651,23 +28512,28 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] + fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; + } + _vpaddlq_u32(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fmaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76677,21 +28543,28 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] + fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmax_f32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76701,23 +28574,28 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] + fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmax_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76727,21 +28605,28 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] + fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmax_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76751,23 +28636,28 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] + fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmax_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76777,21 +28667,28 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] + fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76801,22 +28698,28 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] + fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(umaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76826,21 +28729,28 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - transmute(a) +pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] + fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(fminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76850,23 +28760,28 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] + fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vpmin_f32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76876,21 +28791,28 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - transmute(a) +pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] + fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmin_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76900,23 +28822,28 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] + fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmin_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76926,21 +28853,28 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - transmute(a) +pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] + fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmin_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76950,27 +28884,28 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] + fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -76980,21 +28915,28 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - transmute(a) +pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] + fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uminp) )] #[cfg_attr( not(target_arch = "arm"), @@ -77004,23 +28946,28 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] + fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77029,22 +28976,29 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - transmute(a) +)] +pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] + fn _vqabs_s8(a: int8x8_t) -> int8x8_t; + } + _vqabs_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77054,23 +29008,28 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] + fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; + } + _vqabsq_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77080,21 +29039,28 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - transmute(a) +pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] + fn _vqabs_s16(a: int16x4_t) -> int16x4_t; + } + _vqabs_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77104,23 +29070,28 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] + fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; + } + _vqabsq_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77130,21 +29101,28 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] + fn _vqabs_s32(a: int32x2_t) -> int32x2_t; + } + _vqabs_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -77154,27 +29132,28 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] + fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; + } + _vqabsq_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77184,21 +29163,28 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] + fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqadd_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77208,23 +29194,28 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] + fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqaddq_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77234,21 +29225,28 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] + fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqadd_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77258,23 +29256,28 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] + fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqaddq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77284,21 +29287,28 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] + fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqadd_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77308,23 +29318,28 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] + fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqaddq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77334,21 +29349,28 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] + fn _vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqadd_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77358,23 +29380,28 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] + fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqaddq_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77384,21 +29411,28 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - transmute(a) +pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] + fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77408,23 +29442,28 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] + fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77434,21 +29473,28 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - transmute(a) +pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] + fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77458,23 +29504,28 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] + fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77484,21 +29535,28 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - transmute(a) +pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] + fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77508,23 +29566,28 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] + fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77534,21 +29597,28 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - transmute(a) +pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] + fn _vqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -77558,24 +29628,30 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqadd.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] + fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal, N = 2) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77584,46 +29660,54 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - transmute(a) +pub unsafe fn vqdmlal_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s32(a, vqdmull_lane_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal, N = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vqdmlal_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqaddq_s64(a, vqdmull_lane_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -77633,21 +29717,20 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - transmute(a) +pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqaddq_s32(a, vqdmull_n_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -77657,23 +29740,20 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqaddq_s64(a, vqdmull_n_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -77683,21 +29763,20 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - transmute(a) +pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqaddq_s32(a, vqdmull_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -77707,24 +29786,22 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqaddq_s64(a, vqdmull_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl, N = 2) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77733,22 +29810,27 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - transmute(a) +pub unsafe fn vqdmlsl_lane_s16( + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, +) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s32(a, vqdmull_lane_s16::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl, N = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77757,23 +29839,25 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmlsl_lane_s32( + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, +) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqsubq_s64(a, vqdmull_lane_s32::(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -77783,21 +29867,20 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - transmute(a) +pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqsubq_s32(a, vqdmull_n_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -77807,22 +29890,20 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqsubq_s64(a, vqdmull_n_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -77832,21 +29913,20 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - transmute(a) +pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqsubq_s32(a, vqdmull_s16(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -77856,24 +29936,22 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqsubq_s64(a, vqdmull_s32(b, c)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77882,22 +29960,23 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - transmute(a) +pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77906,24 +29985,23 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77932,22 +30010,23 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - transmute(a) +pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -77956,27 +30035,21 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -77986,21 +30059,21 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - transmute(a) +pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let b: int16x4_t = vdup_n_s16(b); + vqdmulh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78010,23 +30083,21 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let b: int16x8_t = vdupq_n_s16(b); + vqdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78036,21 +30107,21 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - transmute(a) +pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let b: int32x2_t = vdup_n_s32(b); + vqdmulh_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78060,23 +30131,21 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let b: int32x4_t = vdupq_n_s32(b); + vqdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78086,21 +30155,28 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - transmute(a) +pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i16" + )] + fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqdmulh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78110,23 +30186,28 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v8i16" + )] + fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78136,21 +30217,28 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v2i32" + )] + fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqdmulh_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78160,28 +30248,30 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i32" + )] + fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78190,22 +30280,24 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); + vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78214,23 +30306,22 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); + vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -78240,21 +30331,20 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vqdmull_s16(a, vdup_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -78264,23 +30354,20 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vqdmull_s32(a, vdup_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -78290,21 +30377,28 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - transmute(a) +pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v4i32" + )] + fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + _vqdmull_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -78314,23 +30408,28 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v2i64" + )] + fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + _vqdmull_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78340,21 +30439,28 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v8i8" + )] + fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; + } + _vqmovn_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78364,27 +30470,28 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v4i16" + )] + fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; + } + _vqmovn_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78394,21 +30501,28 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - transmute(a) +pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v2i32" + )] + fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; + } + _vqmovn_s64(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78418,26 +30532,28 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v8i8" + )] + fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; + } + _vqmovn_u16(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78447,21 +30563,28 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { - transmute(a) +pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v4i16" + )] + fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; + } + _vqmovn_u32(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -78471,22 +30594,28 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v2i32" + )] + fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; + } + _vqmovn_u64(a.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -78496,21 +30625,28 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - transmute(a) +pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v8i8" + )] + fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; + } + _vqmovun_s16(a).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -78520,22 +30656,28 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v4i16" + )] + fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; + } + _vqmovun_s32(a).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -78545,21 +30687,28 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - transmute(a) +pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v2i32" + )] + fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; + } + _vqmovun_s64(a).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78569,22 +30718,28 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] + fn _vqneg_s8(a: int8x8_t) -> int8x8_t; + } + _vqneg_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78594,21 +30749,28 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - transmute(a) +pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] + fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; + } + _vqnegq_s8(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78618,26 +30780,28 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] + fn _vqneg_s16(a: int16x4_t) -> int16x4_t; + } + _vqneg_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78647,21 +30811,28 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { - transmute(a) +pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] + fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; + } + _vqnegq_s16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78671,22 +30842,28 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] + fn _vqneg_s32(a: int32x2_t) -> int32x2_t; + } + _vqneg_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -78696,22 +30873,30 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - transmute(a) +pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] + fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; + } + _vqnegq_s32(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78720,23 +30905,24 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78745,22 +30931,24 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - transmute(a) +pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + vqrdmulh_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78769,23 +30957,24 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78794,22 +30983,24 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { - transmute(a) +pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); + vqrdmulh_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78818,27 +31009,37 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78847,22 +31048,24 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - transmute(a) +pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78871,23 +31074,37 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b: int16x8_t = simd_shuffle!( + b, + b, + [ + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32, + LANE as u32 + ] + ); + vqrdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -78896,21 +31113,22 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - transmute(a) +pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vqrdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78920,22 +31138,20 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + vqrdmulh_s16(a, vdup_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78945,21 +31161,20 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + vqrdmulhq_s16(a, vdupq_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78969,22 +31184,20 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + vqrdmulh_s32(a, vdup_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -78994,21 +31207,20 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - transmute(a) +pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + vqrdmulhq_s32(a, vdupq_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -79018,22 +31230,28 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" + )] + fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrdmulh_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -79043,21 +31261,28 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" + )] + fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -79067,23 +31292,28 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" + )] + fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrdmulh_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -79093,21 +31323,28 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" + )] + fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79117,22 +31354,28 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i8" + )] + fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqrshl_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79142,21 +31385,28 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - transmute(a) +pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v16i8" + )] + fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqrshlq_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79166,22 +31416,28 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i16" + )] + fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrshl_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79191,21 +31447,28 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i16" + )] + fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrshlq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79215,23 +31478,28 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i32" + )] + fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrshl_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79241,21 +31509,28 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i32" + )] + fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrshlq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79265,22 +31540,28 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v1i64" + )] + fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqrshl_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79290,21 +31571,28 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - transmute(a) +pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i64" + )] + fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqrshlq_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79314,22 +31602,28 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i8" + )] + fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqrshl_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79339,21 +31633,28 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v16i8" + )] + fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqrshlq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79363,23 +31664,28 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i16" + )] + fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqrshl_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79389,21 +31695,28 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - transmute(a) +pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i16" + )] + fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqrshlq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79413,22 +31726,28 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i32" + )] + fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqrshl_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79438,21 +31757,28 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i32" + )] + fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqrshlq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79462,22 +31788,28 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v1i64" + )] + fn _vqrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqrshl_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79487,22 +31819,423 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - transmute(a) +pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i64" + )] + fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqrshlq_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] + fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] + fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] + fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + )] + fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrn_n_s16(a, N) +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + )] + fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrn_n_s32(a, N) +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + )] + fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrn_n_s64(a, N) +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] + fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned() +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] + fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned() +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] + fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned() +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v8i8" + )] + fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v4i16" + )] + fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v2i32" + )] + fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] + fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqrshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] + fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqrshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) + .as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] + fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v8i8" + )] + fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqrshrun_n_s16(a, N).as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v4i16" + )] + fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqrshrun_n_s32(a, N).as_unsigned() +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v2i32" + )] + fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqrshrun_n_s64(a, N).as_unsigned() +} +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79511,23 +32244,23 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_s8(a, vdup_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79536,22 +32269,23 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_s8(a, vdupq_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79560,24 +32294,23 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_s16(a, vdup_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79586,22 +32319,23 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_s16(a, vdupq_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79610,23 +32344,23 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_s32(a, vdup_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79635,22 +32369,23 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - transmute(a) +pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_s32(a, vdupq_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79659,23 +32394,23 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_s64(a, vdup_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79684,22 +32419,23 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_s64(a, vdupq_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79708,24 +32444,23 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_u8(a, vdup_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79734,22 +32469,23 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_u8(a, vdupq_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79758,23 +32494,23 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_u16(a, vdup_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79783,22 +32519,23 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - transmute(a) +pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_u16(a, vdupq_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79807,23 +32544,23 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_u32(a, vdup_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79832,22 +32569,23 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_u32(a, vdupq_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79856,24 +32594,23 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_u64(a, vdup_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -79882,21 +32619,21 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - transmute(a) +pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_u64(a, vdupq_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79906,22 +32643,28 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i8" + )] + fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqshl_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79931,21 +32674,28 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v16i8" + )] + fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqshlq_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79955,22 +32705,28 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i16" + )] + fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqshl_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -79980,21 +32736,28 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - transmute(a) +pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i16" + )] + fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqshlq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80004,22 +32767,28 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i32" + )] + fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqshl_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80029,21 +32798,28 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i32" + )] + fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqshlq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80053,23 +32829,28 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v1i64" + )] + fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqshl_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80079,21 +32860,28 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - transmute(a) +pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i64" + )] + fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqshlq_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80103,22 +32891,28 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i8" + )] + fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqshl_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80128,21 +32922,28 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - transmute(a) +pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v16i8" + )] + fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqshlq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80152,22 +32953,28 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i16" + )] + fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqshl_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80177,21 +32984,28 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - transmute(a) +pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i16" + )] + fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqshlq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80201,23 +33015,28 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i32" + )] + fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqshl_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80227,21 +33046,28 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - transmute(a) +pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i32" + )] + fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqshlq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80251,22 +33077,28 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v1i64" + )] + fn _vqshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqshl_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -80276,517 +33108,799 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - transmute(a) +pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i64" + )] + fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqshlq_u64(a.as_signed(), b).as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + } + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i8" + )] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + } + _vqshlu_n_s8( + a, + const { + int8x8_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v16i8" + )] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + } + _vqshluq_n_s8( + a, + const { + int8x16_t([ + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i16" + )] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + } + _vqshlu_n_s16( + a, + const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i16" + )] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + } + _vqshluq_n_s16( + a, + const { + int16x8_t([ + N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, + ]) + }, + ) + .as_unsigned() +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i32" + )] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + } + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i32" + )] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + } + _vqshluq_n_s32( + a, + const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, + ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - transmute(a) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v1i64" + )] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + } + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i64" + )] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + } + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] + fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] + fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] + fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v8i8" + )] + fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrn_n_s16(a, N) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - transmute(a) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v4i16" + )] + fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrn_n_s32(a, N) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v2i32" + )] + fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrn_n_s64(a, N) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] + fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrn_n_u16( + a.as_signed(), + const { + uint16x8_t([ + -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, + -N as u16, + ]) + } + .as_signed(), + ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] + fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrn_n_u32( + a.as_signed(), + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] + fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrn_n_u64( + a.as_signed(), + const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), + ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v8i8" + )] + fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrn_n_u16(a.as_signed(), N).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - transmute(a) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v4i16" + )] + fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrn_n_u32(a.as_signed(), N).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v2i32" + )] + fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrn_n_u64(a.as_signed(), N).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vqshrun_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vqshrun_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) + .as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - transmute(a) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v8i8" + )] + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vqshrun_n_s16(a, N).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - transmute(a) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v4i16" + )] + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vqshrun_n_s32(a, N).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v2i32" + )] + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vqshrun_n_s64(a, N).as_unsigned() +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80796,23 +33910,28 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] + fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqsub_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80822,21 +33941,28 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - transmute(a) +pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] + fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqsubq_s8(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80846,27 +33972,28 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] + fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqsub_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80876,21 +34003,28 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - transmute(a) +pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] + fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vqsubq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80900,23 +34034,28 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] + fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vqsub_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80926,21 +34065,28 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - transmute(a) +pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] + fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vqsubq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80950,23 +34096,28 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v1i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] + fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vqsub_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -80976,21 +34127,28 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - transmute(a) +pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqsub.v2i64" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] + fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vqsubq_s64(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81000,27 +34158,28 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] + fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81030,21 +34189,28 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - transmute(a) +pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] + fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81054,23 +34220,28 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] + fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81080,29 +34251,28 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i8" + link_name = "llvm.aarch64.neon.uqsub.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] - fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] + fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vrhadd_s8(a, b) + _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81112,32 +34282,28 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i8" + link_name = "llvm.aarch64.neon.uqsub.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] - fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] + fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrhadd_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81147,29 +34313,28 @@ pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v16i8" + link_name = "llvm.aarch64.neon.uqsub.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] - fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] + fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vrhaddq_s8(a, b) + _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81179,36 +34344,28 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v16i8" + link_name = "llvm.aarch64.neon.uqsub.v1i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] - fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] + fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vrhaddq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) + _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -81218,29 +34375,28 @@ pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i16" + link_name = "llvm.aarch64.neon.uqsub.v2i64" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] - fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] + fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vrhadd_s16(a, b) + _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81250,32 +34406,21 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] - fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vrhadd_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x = vraddhn_s16(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81285,29 +34430,21 @@ pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] - fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrhaddq_s16(a, b) +pub unsafe fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x = vraddhn_s32(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81317,32 +34454,21 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] - fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vrhaddq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x = vraddhn_s64(b, c); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81352,29 +34478,21 @@ pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] - fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrhadd_s32(a, b) +pub unsafe fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81384,32 +34502,21 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] - fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vrhadd_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -81419,29 +34526,21 @@ pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] - fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrhaddq_s32(a, b) +pub unsafe fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); + simd_shuffle!(a, x, [0, 1, 2, 3]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81451,32 +34550,28 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i32" + link_name = "llvm.aarch64.neon.raddhn.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] - fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] + fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vrhaddq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vraddhn_s16(a, b) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81486,29 +34581,28 @@ pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { +pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i8" + link_name = "llvm.aarch64.neon.raddhn.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] - fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] + fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; } - _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vraddhn_s32(a, b) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81518,32 +34612,29 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { +pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i8" + link_name = "llvm.aarch64.neon.raddhn.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] - fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] + fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vraddhn_s64(a, b) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81553,29 +34644,21 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] - fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + transmute(vraddhn_s16(transmute(a), transmute(b))) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81585,36 +34668,24 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] - fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81624,29 +34695,21 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] - fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + transmute(vraddhn_s32(transmute(a), transmute(b))) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81656,32 +34719,24 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] - fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81691,29 +34746,21 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] - fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() +pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + transmute(vraddhn_s64(transmute(a), transmute(b))) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -81723,32 +34770,23 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] - fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -81758,29 +34796,28 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v2i32" + link_name = "llvm.aarch64.neon.frecpe.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] - fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; } - _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrecpe_f32(a) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -81790,32 +34827,28 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v2i32" + link_name = "llvm.aarch64.neon.frecpe.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] - fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vrecpeq_f32(a) } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -81825,29 +34858,28 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i32" + link_name = "llvm.aarch64.neon.urecpe.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] - fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; } - _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrecpe_u32(a.as_signed()).as_unsigned() } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -81857,32 +34889,28 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i32" + link_name = "llvm.aarch64.neon.urecpe.v4i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] - fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) + _vrecpeq_u32(a.as_signed()).as_unsigned() } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -81892,29 +34920,28 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { +pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f32" + link_name = "llvm.aarch64.neon.frecps.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] - fn _vrndn_f32(a: float32x2_t) -> float32x2_t; + fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - _vrndn_f32(a) + _vrecps_f32(a, b) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -81924,31 +34951,29 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { +pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v2f32" + link_name = "llvm.aarch64.neon.frecps.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] - fn _vrndn_f32(a: float32x2_t) -> float32x2_t; + fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrndn_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) + _vrecpsq_f32(a, b) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -81958,29 +34983,21 @@ pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v4f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] - fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; - } - _vrndnq_f32(a) +pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { + transmute(a) } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -81990,31 +35007,22 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frintn.v4f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] - fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrndnq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82024,29 +35032,21 @@ pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i8" - )] - fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vrshl_s8(a, b) +pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82056,32 +35056,23 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i8" - )] - fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrshl_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82091,29 +35082,21 @@ pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v16i8" - )] - fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrshlq_s8(a, b) +pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82123,36 +35106,23 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v16i8" - )] - fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vrshlq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82162,29 +35132,21 @@ pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i16" - )] - fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrshl_s16(a, b) +pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82194,32 +35156,23 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i16" - )] - fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vrshl_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82229,29 +35182,21 @@ pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i16" - )] - fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrshlq_s16(a, b) +pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82261,32 +35206,22 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i16" - )] - fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vrshlq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82296,29 +35231,21 @@ pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i32" - )] - fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrshl_s32(a, b) +pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82328,32 +35255,23 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i32" - )] - fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vrshl_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82363,29 +35281,21 @@ pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i32" - )] - fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrshlq_s32(a, b) +pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82395,31 +35305,23 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i32" - )] - fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vrshlq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82429,29 +35331,47 @@ pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v1i64" - )] - fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vrshl_s64(a, b) +pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82461,29 +35381,21 @@ pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i64" - )] - fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vrshlq_s64(a, b) +pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + transmute(a) } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82493,32 +35405,22 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i64" - )] - fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vrshlq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82528,64 +35430,47 @@ pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i8" - )] - fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vrshl_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i8" - )] - fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vrshl_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82595,29 +35480,21 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v16i8" - )] - fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vrshlq_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82627,36 +35504,23 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v16i8" - )] - fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vrshlq_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82666,29 +35530,21 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i16" - )] - fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vrshl_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82698,32 +35554,22 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i16" - )] - fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vrshl_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82733,29 +35579,21 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i16" - )] - fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vrshlq_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82765,32 +35603,27 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i16" - )] - fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vrshlq_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82800,29 +35633,21 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i32" - )] - fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vrshl_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82832,32 +35657,23 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i32" - )] - fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vrshl_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82867,29 +35683,21 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i32" - )] - fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vrshlq_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82899,31 +35707,23 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i32" - )] - fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vrshlq_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82933,29 +35733,21 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v1i64" - )] - fn _vrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vrshl_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { + transmute(a) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82965,29 +35757,23 @@ pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i64" - )] - fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vrshlq_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -82997,34 +35783,22 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i64" - )] - fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vrshlq_u64(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83033,24 +35807,28 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_s8(a, vdup_n_s8(-N as _)) +pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83059,26 +35837,22 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vrshl_s8(a, vdup_n_s8(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83087,24 +35861,24 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_s8(a, vdupq_n_s8(-N as _)) +pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83113,30 +35887,22 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vrshlq_s8(a, vdupq_n_s8(-N as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83145,24 +35911,24 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_s16(a, vdup_n_s16(-N as _)) +pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83171,26 +35937,22 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = vrshl_s16(a, vdup_n_s16(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83199,24 +35961,24 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_s16(a, vdupq_n_s16(-N as _)) +pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83225,26 +35987,22 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vrshlq_s16(a, vdupq_n_s16(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83253,24 +36011,28 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_s32(a, vdup_n_s32(-N as _)) +pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83279,26 +36041,22 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = vrshl_s32(a, vdup_n_s32(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83307,24 +36065,24 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_s32(a, vdupq_n_s32(-N as _)) +pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83333,25 +36091,22 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = vrshlq_s32(a, vdupq_n_s32(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83360,24 +36115,24 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_s64(a, vdup_n_s64(-N as _)) +pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83386,24 +36141,22 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_s64(a, vdupq_n_s64(-N as _)) +pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + transmute(a) } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83412,26 +36165,24 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = vrshlq_s64(a, vdupq_n_s64(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83440,24 +36191,22 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_u8(a, vdup_n_s8(-N as _)) +pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83466,26 +36215,24 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = vrshl_u8(a, vdup_n_s8(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83494,24 +36241,22 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_u8(a, vdupq_n_s8(-N as _)) +pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83520,30 +36265,23 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = vrshlq_u8(a, vdupq_n_s8(-N as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83552,24 +36290,22 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_u16(a, vdup_n_s16(-N as _)) +pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83578,26 +36314,24 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = vrshl_u16(a, vdup_n_s16(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83606,24 +36340,22 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_u16(a, vdupq_n_s16(-N as _)) +pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83632,26 +36364,24 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = vrshlq_u16(a, vdupq_n_s16(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83660,24 +36390,22 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_u32(a, vdup_n_s32(-N as _)) +pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83686,26 +36414,24 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = vrshl_u32(a, vdup_n_s32(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83714,24 +36440,22 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_u32(a, vdupq_n_s32(-N as _)) +pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83740,25 +36464,23 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = vrshlq_u32(a, vdupq_n_s32(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83767,24 +36489,22 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_u64(a, vdup_n_s64(-N as _)) +pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + transmute(a) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83793,24 +36513,24 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_u64(a, vdupq_n_s64(-N as _)) +pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -83819,306 +36539,48 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = vrshlq_u64(a, vdupq_n_s64(-N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] - fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - _vrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] - fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] - fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - _vrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] - fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] - fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] - fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v8i8" - )] - fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - _vrshrn_n_s16(a, N) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v8i8" - )] - fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrshrn_n_s16(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v4i16" - )] - fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - _vrshrn_n_s32(a, N) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v4i16" - )] - fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vrshrn_n_s32(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v2i32" - )] - fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - _vrshrn_n_s64(a, N) +pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + transmute(a) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v2i32" - )] - fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = _vrshrn_n_s64(a, N); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84127,24 +36589,22 @@ pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - transmute(vrshrn_n_s16::(transmute(a))) +pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + transmute(a) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84153,26 +36613,24 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = transmute(vrshrn_n_s16::(transmute(a))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84181,24 +36639,22 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - transmute(vrshrn_n_s32::(transmute(a))) +pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + transmute(a) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84207,26 +36663,24 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = transmute(vrshrn_n_s32::(transmute(a))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84235,24 +36689,22 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - transmute(vrshrn_n_s64::(transmute(a))) +pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + transmute(a) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84261,24 +36713,23 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = transmute(vrshrn_n_s64::(transmute(a))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84288,29 +36739,21 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f32" - )] - fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; - } - _vrsqrte_f32(a) +pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + transmute(a) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84320,31 +36763,23 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f32" - )] - fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: float32x2_t = _vrsqrte_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84354,29 +36789,21 @@ pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v4f32" - )] - fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; - } - _vrsqrteq_f32(a) +pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + transmute(a) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84386,31 +36813,27 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v4f32" - )] - fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrsqrteq_f32(a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84420,29 +36843,21 @@ pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v2i32" - )] - fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; - } - _vrsqrte_u32(a.as_signed()).as_unsigned() +pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { + transmute(a) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84452,31 +36867,23 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v2i32" - )] - fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = _vrsqrte_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84486,29 +36893,21 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v4i32" - )] - fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; - } - _vrsqrteq_u32(a.as_signed()).as_unsigned() +pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + transmute(a) } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84518,31 +36917,23 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v4i32" - )] - fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vrsqrteq_u32(a.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84552,29 +36943,21 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f32" - )] - fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - _vrsqrts_f32(a, b) +pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { + transmute(a) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84584,32 +36967,23 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f32" - )] - fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = _vrsqrts_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84619,29 +36993,21 @@ pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v4f32" - )] - fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - _vrsqrtsq_f32(a, b) +pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + transmute(a) } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -84651,34 +37017,28 @@ pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v4f32" - )] - fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = _vrsqrtsq_f32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84687,24 +37047,22 @@ pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshr_n_s8::(b)) +pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84713,27 +37071,24 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_add(a, vrshr_n_s8::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84742,24 +37097,22 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshrq_n_s8::(b)) +pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84768,31 +37121,24 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_add(a, vrshrq_n_s8::(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84801,24 +37147,22 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshr_n_s16::(b)) +pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84827,27 +37171,24 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_add(a, vrshr_n_s16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84856,24 +37197,22 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshrq_n_s16::(b)) +pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84882,27 +37221,24 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_add(a, vrshrq_n_s16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84911,24 +37247,47 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshr_n_s32::(b)) +pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84937,27 +37296,22 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_add(a, vrshr_n_s32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84966,24 +37320,24 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshrq_n_s32::(b)) +pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -84992,26 +37346,22 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_add(a, vrshrq_n_s32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85020,24 +37370,24 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshr_n_s64::(b)) +pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85046,24 +37396,22 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshrq_n_s64::(b)) +pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + transmute(a) } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85072,27 +37420,24 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_add(a, vrshrq_n_s64::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85101,24 +37446,22 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshr_n_u8::(b)) +pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85127,27 +37470,23 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_add(a, vrshr_n_u8::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85156,24 +37495,22 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vrshrq_n_u8::(b)) +pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85182,31 +37519,24 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_add(a, vrshrq_n_u8::(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85215,24 +37545,22 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshr_n_u16::(b)) +pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85241,27 +37569,24 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_add(a, vrshr_n_u16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85270,24 +37595,22 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vrshrq_n_u16::(b)) +pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85296,27 +37619,24 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_add(a, vrshrq_n_u16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85325,24 +37645,22 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshr_n_u32::(b)) +pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85351,27 +37669,28 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_add(a, vrshr_n_u32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85380,24 +37699,22 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vrshrq_n_u32::(b)) +pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85406,26 +37723,24 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_add(a, vrshrq_n_u32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85434,24 +37749,48 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshr_n_u64::(b)) +pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85460,24 +37799,22 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vrshrq_n_u64::(b)) +pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + transmute(a) } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85486,25 +37823,27 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_add(a, vrshrq_n_u64::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85514,29 +37853,21 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v8i8" - )] - fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - _vrsubhn_s16(a, b) +pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85546,32 +37877,23 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v8i8" - )] - fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vrsubhn_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85581,29 +37903,21 @@ pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v4i16" - )] - fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - _vrsubhn_s32(a, b) +pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85613,32 +37927,23 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v4i16" - )] - fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vrsubhn_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85648,29 +37953,21 @@ pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v2i32" - )] - fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - _vrsubhn_s64(a, b) +pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85680,32 +37977,23 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v2i32" - )] - fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vrsubhn_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85715,21 +38003,21 @@ pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - transmute(vrsubhn_s16(transmute(a), transmute(b))) +pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85739,24 +38027,27 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vrsubhn_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85766,21 +38057,21 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - transmute(vrsubhn_s32(transmute(a), transmute(b))) +pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85790,24 +38081,23 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(vrsubhn_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85817,21 +38107,21 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - transmute(vrsubhn_s64(transmute(a), transmute(b))) +pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { + transmute(a) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -85841,26 +38131,24 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); - let ret_val: uint32x2_t = transmute(vrsubhn_s64(transmute(a), transmute(b))); +pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85869,24 +38157,22 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85895,26 +38181,24 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85923,24 +38207,22 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85949,26 +38231,24 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -85977,24 +38257,22 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86003,26 +38281,23 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86031,24 +38306,22 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86057,30 +38330,24 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86089,24 +38356,22 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86115,26 +38380,24 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86143,24 +38406,22 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86169,26 +38430,24 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86197,24 +38456,22 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86223,26 +38480,23 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86251,24 +38505,22 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86277,26 +38529,24 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86305,24 +38555,22 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86331,26 +38579,24 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86359,24 +38605,22 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86385,26 +38629,24 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86413,24 +38655,22 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86439,30 +38679,28 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_insert!(b, LANE as u32, a); +pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86471,24 +38709,22 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86497,26 +38733,24 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86525,24 +38759,22 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86551,26 +38783,24 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86579,24 +38809,22 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86605,26 +38833,28 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86633,24 +38863,22 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86659,26 +38887,24 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86687,24 +38913,22 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86713,26 +38937,24 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86741,24 +38963,22 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86767,26 +38987,24 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86795,24 +39013,22 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86821,30 +39037,28 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = simd_insert!(b, LANE as u32, a); +pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86853,24 +39067,22 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86879,26 +39091,24 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86907,24 +39117,22 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86933,25 +39141,23 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86960,23 +39166,22 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -86985,23 +39190,23 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87010,24 +39215,22 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { - static_assert!(LANE == 0); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { + transmute(a) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87036,24 +39239,23 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(b, LANE as u32, a) +pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87062,875 +39264,673 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: poly64x2_t = simd_insert!(b, LANE as u32, a); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + transmute(a) } -#[doc = "SHA1 hash update accelerator, choose."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1c))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1c" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] - fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() -} -#[doc = "SHA1 hash update accelerator, choose."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1c))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1c" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] - fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); - let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "SHA1 fixed rotate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1h))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] - fn _vsha1h_u32(hash_e: i32) -> i32; - } - _vsha1h_u32(hash_e.as_signed()).as_unsigned() -} -#[doc = "SHA1 hash update accelerator, majority"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1m))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1m" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] - fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() +pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { + transmute(a) } -#[doc = "SHA1 hash update accelerator, majority"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1m))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1m" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] - fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); - let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "SHA1 hash update accelerator, parity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1p))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1p" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] - fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() +pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "SHA1 hash update accelerator, parity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1p))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1p" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] - fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; - } - let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); - let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su0))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] - fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; - } - _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned() +pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { + transmute(a) } -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] - fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; - } - let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [0, 1, 2, 3]); - let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [0, 1, 2, 3]); - let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "SHA1 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] - fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; - } - _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned() -} -#[doc = "SHA1 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su1))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] - fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; - } - let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [0, 1, 2, 3]); - let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + transmute(a) } -#[doc = "SHA1 schedule update accelerator, upper part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h2))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256h2q_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h2" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] - fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; - } - _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "SHA1 schedule update accelerator, upper part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h2))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256h2q_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h2" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] - fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; - } - let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); - let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [0, 1, 2, 3]); - let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { + transmute(a) } -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256hq_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] - fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; - } - _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { + transmute(a) } -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256hq_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] - fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; - } - let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [0, 1, 2, 3]); - let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [0, 1, 2, 3]); - let wk: uint32x4_t = simd_shuffle!(wk, wk, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "SHA256 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] - fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; - } - _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned() +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + transmute(a) } -#[doc = "SHA256 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] - fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; - } - let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [0, 1, 2, 3]); - let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "SHA256 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256su1q_u32( - tw0_3: uint32x4_t, - w8_11: uint32x4_t, - w12_15: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] - fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; - } - _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned() +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "SHA256 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub unsafe fn vsha256su1q_u32( - tw0_3: uint32x4_t, - w8_11: uint32x4_t, - w12_15: uint32x4_t, -) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] - fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; - } - let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [0, 1, 2, 3]); - let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [0, 1, 2, 3]); - let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [0, 1, 2, 3]); - let ret_val: uint32x4_t = - _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] - fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - _vshiftins_v16i8(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] - fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vshiftins_v16i8(a, b, c); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v1i64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] - fn _vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; - } - _vshiftins_v1i64(a, b, c) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] - fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - _vshiftins_v2i32(a, b, c) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] - fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int32x2_t = _vshiftins_v2i32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] - fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - _vshiftins_v2i64(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] - fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let ret_val: int64x2_t = _vshiftins_v2i64(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] - fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - _vshiftins_v4i16(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] - fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vshiftins_v4i16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] - fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - _vshiftins_v4i32(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] - fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vshiftins_v4i32(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] - fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - _vshiftins_v8i16(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] - fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vshiftins_v8i16(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] - fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - _vshiftins_v8i8(a, b, c) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] - fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vshiftins_v8i8(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87939,24 +39939,22 @@ unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdup_n_s8(N as _)) +pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87965,26 +39963,24 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_shl(a, vdup_n_s8(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -87993,24 +39989,22 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdupq_n_s8(N as _)) +pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88019,30 +40013,28 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_shl(a, vdupq_n_s8(N as _)); +pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88051,24 +40043,22 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdup_n_s16(N as _)) +pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88077,26 +40067,24 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_shl(a, vdup_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88105,24 +40093,22 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdupq_n_s16(N as _)) +pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88131,26 +40117,24 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shl(a, vdupq_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88159,24 +40143,22 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdup_n_s32(N as _)) +pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88185,26 +40167,24 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = simd_shl(a, vdup_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88213,24 +40193,22 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdupq_n_s32(N as _)) +pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88239,25 +40217,24 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shl(a, vdupq_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88266,24 +40243,48 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdup_n_s64(N as _)) +pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88292,24 +40293,22 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdupq_n_s64(N as _)) +pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88318,26 +40317,23 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = simd_shl(a, vdupq_n_s64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88346,24 +40342,22 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdup_n_u8(N as _)) +pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88372,26 +40366,24 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_shl(a, vdup_n_u8(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88400,24 +40392,22 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - simd_shl(a, vdupq_n_u8(N as _)) +pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88426,30 +40416,24 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_shl(a, vdupq_n_u8(N as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88458,24 +40442,22 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdup_n_u16(N as _)) +pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88484,26 +40466,47 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_shl(a, vdup_n_u16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88512,24 +40515,24 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - simd_shl(a, vdupq_n_u16(N as _)) +pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88538,26 +40541,22 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shl(a, vdupq_n_u16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88566,24 +40565,24 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdup_n_u32(N as _)) +pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88592,26 +40591,22 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = simd_shl(a, vdup_n_u32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88620,24 +40615,24 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - simd_shl(a, vdupq_n_u32(N as _)) +pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88646,25 +40641,22 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shl(a, vdupq_n_u32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88673,24 +40665,28 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdup_n_u64(N as _)) +pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88699,24 +40695,22 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - simd_shl(a, vdupq_n_u64(N as _)) +pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + transmute(a) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -88725,24 +40719,23 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = simd_shl(a, vdupq_n_u64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88752,29 +40745,21 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i8" - )] - fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vshl_s8(a, b) +pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88784,32 +40769,23 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i8" - )] - fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vshl_s8(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88819,29 +40795,21 @@ pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v16i8" - )] - fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vshlq_s8(a, b) +pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88851,36 +40819,23 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v16i8" - )] - fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = _vshlq_s8(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88890,29 +40845,21 @@ pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i16" - )] - fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vshl_s16(a, b) +pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88922,32 +40869,23 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i16" - )] - fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = _vshl_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88957,29 +40895,21 @@ pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i16" - )] - fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vshlq_s16(a, b) +pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -88989,32 +40919,23 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i16" - )] - fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = _vshlq_s16(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89024,29 +40945,21 @@ pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i32" - )] - fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vshl_s32(a, b) +pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89056,32 +40969,23 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i32" - )] - fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = _vshl_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89091,29 +40995,21 @@ pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i32" - )] - fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vshlq_s32(a, b) +pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89123,31 +41019,27 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i32" - )] - fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = _vshlq_s32(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89157,29 +41049,21 @@ pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v1i64" - )] - fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vshl_s64(a, b) +pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + transmute(a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89189,29 +41073,23 @@ pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i64" - )] - fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vshlq_s64(a, b) +pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89221,32 +41099,21 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i64" - )] - fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = _vshlq_s64(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89256,29 +41123,23 @@ pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i8" - )] - fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - _vshl_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89288,32 +41149,21 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i8" - )] - fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = _vshl_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89323,29 +41173,23 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v16i8" - )] - fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - _vshlq_u8(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89355,36 +41199,21 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v16i8" - )] - fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = _vshlq_u8(a.as_signed(), b).as_unsigned(); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89394,29 +41223,23 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i16" - )] - fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - _vshl_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89426,32 +41249,21 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i16" - )] - fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = _vshl_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89461,29 +41273,23 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i16" - )] - fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - _vshlq_u16(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89493,32 +41299,21 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i16" - )] - fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = _vshlq_u16(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89528,29 +41323,22 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i32" - )] - fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - _vshl_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89560,32 +41348,21 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i32" - )] - fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = _vshl_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89595,29 +41372,23 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i32" - )] - fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - _vshlq_u32(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89627,31 +41398,21 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i32" - )] - fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = _vshlq_u32(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89661,29 +41422,23 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v1i64" - )] - fn _vshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - _vshl_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89693,29 +41448,21 @@ pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i64" - )] - fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - _vshlq_u64(a.as_signed(), b).as_unsigned() +pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + transmute(a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -89725,34 +41472,23 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i64" - )] - fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = _vshlq_u64(a.as_signed(), b).as_unsigned(); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89761,24 +41497,22 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - simd_shl(simd_cast(a), vdupq_n_s32(N as _)) +pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89787,26 +41521,24 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_shl(simd_cast(a), vdupq_n_s32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89815,24 +41547,22 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - simd_shl(simd_cast(a), vdupq_n_s64(N as _)) +pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89841,26 +41571,24 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int64x2_t = simd_shl(simd_cast(a), vdupq_n_s64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89869,24 +41597,22 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - simd_shl(simd_cast(a), vdupq_n_s16(N as _)) +pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89895,26 +41621,24 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_shl(simd_cast(a), vdupq_n_s16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89923,24 +41647,22 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - simd_shl(simd_cast(a), vdupq_n_u32(N as _)) +pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89949,26 +41671,28 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_shl(simd_cast(a), vdupq_n_u32(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -89977,24 +41701,22 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - simd_shl(simd_cast(a), vdupq_n_u64(N as _)) +pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90003,26 +41725,24 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint64x2_t = simd_shl(simd_cast(a), vdupq_n_u64(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90031,24 +41751,22 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - simd_shl(simd_cast(a), vdupq_n_u16(N as _)) +pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + transmute(a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90057,26 +41775,24 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_shl(simd_cast(a), vdupq_n_u16(N as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90085,25 +41801,22 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - simd_shr(a, vdup_n_s8(n as _)) +pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90112,27 +41825,24 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let n: i32 = if N == 8 { 7 } else { N }; - let ret_val: int8x8_t = simd_shr(a, vdup_n_s8(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90141,25 +41851,22 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - simd_shr(a, vdupq_n_s8(n as _)) +pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90168,31 +41875,28 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let n: i32 = if N == 8 { 7 } else { N }; - let ret_val: int8x16_t = simd_shr(a, vdupq_n_s8(n as _)); +pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90201,25 +41905,22 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - simd_shr(a, vdup_n_s16(n as _)) +pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90228,27 +41929,24 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let n: i32 = if N == 16 { 15 } else { N }; - let ret_val: int16x4_t = simd_shr(a, vdup_n_s16(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90257,25 +41955,22 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - simd_shr(a, vdupq_n_s16(n as _)) +pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90284,27 +41979,24 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let n: i32 = if N == 16 { 15 } else { N }; - let ret_val: int16x8_t = simd_shr(a, vdupq_n_s16(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90313,25 +42005,22 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - simd_shr(a, vdup_n_s32(n as _)) +pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90340,27 +42029,28 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let n: i32 = if N == 32 { 31 } else { N }; - let ret_val: int32x2_t = simd_shr(a, vdup_n_s32(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90369,25 +42059,22 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - simd_shr(a, vdupq_n_s32(n as _)) +pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90396,26 +42083,24 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let n: i32 = if N == 32 { 31 } else { N }; - let ret_val: int32x4_t = simd_shr(a, vdupq_n_s32(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90424,25 +42109,48 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - simd_shr(a, vdup_n_s64(n as _)) +pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90451,25 +42159,22 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - simd_shr(a, vdupq_n_s64(n as _)) +pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90478,27 +42183,24 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let n: i32 = if N == 64 { 63 } else { N }; - let ret_val: int64x2_t = simd_shr(a, vdupq_n_s64(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90507,29 +42209,22 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdup_n_u8(0); - } else { - N - }; - simd_shr(a, vdup_n_u8(n as _)) +pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90538,31 +42233,24 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let n: i32 = if N == 8 { - return vdup_n_u8(0); - } else { - N - }; - let ret_val: uint8x8_t = simd_shr(a, vdup_n_u8(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90571,29 +42259,22 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdupq_n_u8(0); - } else { - N - }; - simd_shr(a, vdupq_n_u8(n as _)) +pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90602,35 +42283,24 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let n: i32 = if N == 8 { - return vdupq_n_u8(0); - } else { - N - }; - let ret_val: uint8x16_t = simd_shr(a, vdupq_n_u8(n as _)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90639,29 +42309,22 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdup_n_u16(0); - } else { - N - }; - simd_shr(a, vdup_n_u16(n as _)) +pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90670,31 +42333,23 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let n: i32 = if N == 16 { - return vdup_n_u16(0); - } else { - N - }; - let ret_val: uint16x4_t = simd_shr(a, vdup_n_u16(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90703,29 +42358,22 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdupq_n_u16(0); - } else { - N - }; - simd_shr(a, vdupq_n_u16(n as _)) +pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90734,31 +42382,24 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let n: i32 = if N == 16 { - return vdupq_n_u16(0); - } else { - N - }; - let ret_val: uint16x8_t = simd_shr(a, vdupq_n_u16(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90767,29 +42408,22 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdup_n_u32(0); - } else { - N - }; - simd_shr(a, vdup_n_u32(n as _)) +pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90798,31 +42432,24 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let n: i32 = if N == 32 { - return vdup_n_u32(0); - } else { - N - }; - let ret_val: uint32x2_t = simd_shr(a, vdup_n_u32(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90831,29 +42458,22 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdupq_n_u32(0); - } else { - N - }; - simd_shr(a, vdupq_n_u32(n as _)) +pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90862,30 +42482,23 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let n: i32 = if N == 32 { - return vdupq_n_u32(0); - } else { - N - }; - let ret_val: uint32x4_t = simd_shr(a, vdupq_n_u32(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90894,29 +42507,22 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdup_n_u64(0); - } else { - N - }; - simd_shr(a, vdup_n_u64(n as _)) +pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90925,29 +42531,48 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdupq_n_u64(0); - } else { - N - }; - simd_shr(a, vdupq_n_u64(n as _)) +pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + transmute(a) } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90956,31 +42581,24 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let n: i32 = if N == 64 { - return vdupq_n_u64(0); - } else { - N - }; - let ret_val: uint64x2_t = simd_shr(a, vdupq_n_u64(n as _)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -90989,24 +42607,22 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_cast(simd_shr(a, vdupq_n_s16(N as _))) +pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91015,26 +42631,24 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_cast(simd_shr(a, vdupq_n_s16(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91043,24 +42657,22 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_cast(simd_shr(a, vdupq_n_s32(N as _))) +pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91069,26 +42681,28 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_cast(simd_shr(a, vdupq_n_s32(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91097,24 +42711,22 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_cast(simd_shr(a, vdupq_n_s64(N as _))) +pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91123,26 +42735,24 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: int32x2_t = simd_cast(simd_shr(a, vdupq_n_s64(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91151,24 +42761,22 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_cast(simd_shr(a, vdupq_n_u16(N as _))) +pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91177,26 +42785,24 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_cast(simd_shr(a, vdupq_n_u16(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91205,24 +42811,22 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_cast(simd_shr(a, vdupq_n_u32(N as _))) +pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91231,26 +42835,24 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_cast(simd_shr(a, vdupq_n_u32(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91259,24 +42861,22 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_cast(simd_shr(a, vdupq_n_u64(N as _))) +pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + transmute(a) } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -91285,752 +42885,549 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let ret_val: uint32x2_t = simd_cast(simd_shr(a, vdupq_n_u64(N as _))); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)); +pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - vshiftins_v2i32(a, b, int32x2_t::splat(N)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(N)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - vshiftins_v4i32(a, b, int32x4_t::splat(N)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(N)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 0 && N <= 63); - vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); - vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(N as i8), - )) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(N as i8), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(N as i8), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(N as i8), - )); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(N as i32), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(N as i32), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(N as i32), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(N as i32), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 0 && N <= 63); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(N as i64), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(N as i64), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(N as i8), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(N as i8), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(N as i8), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(N as i8), - )); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { + transmute(a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92039,24 +43436,22 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshr_n_s8::(b)) +pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92065,27 +43460,23 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_add(a, vshr_n_s8::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92094,24 +43485,22 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshrq_n_s8::(b)) +pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92120,31 +43509,23 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_add(a, vshrq_n_s8::(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92153,24 +43534,22 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshr_n_s16::(b)) +pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92179,27 +43558,23 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_add(a, vshr_n_s16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92208,24 +43583,22 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshrq_n_s16::(b)) +pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92234,27 +43607,24 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_add(a, vshrq_n_s16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92263,24 +43633,22 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshr_n_s32::(b)) +pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92289,27 +43657,28 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_add(a, vshr_n_s32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92318,24 +43687,22 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshrq_n_s32::(b)) +pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92344,26 +43711,24 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_add(a, vshrq_n_s32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92372,24 +43737,48 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshr_n_s64::(b)) +pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92398,24 +43787,22 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshrq_n_s64::(b)) +pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + transmute(a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92424,27 +43811,24 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_add(a, vshrq_n_s64::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92453,24 +43837,22 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshr_n_u8::(b)) +pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92479,27 +43861,28 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_add(a, vshr_n_u8::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92508,24 +43891,22 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - simd_add(a, vshrq_n_u8::(b)) +pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92534,31 +43915,74 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_add(a, vshrq_n_u8::(b)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92567,24 +43991,22 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshr_n_u16::(b)) +pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92593,27 +44015,28 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_add(a, vshr_n_u16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92622,24 +44045,22 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - simd_add(a, vshrq_n_u16::(b)) +pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92648,27 +44069,24 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_add(a, vshrq_n_u16::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92677,24 +44095,22 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshr_n_u32::(b)) +pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92703,27 +44119,24 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_add(a, vshr_n_u32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92732,24 +44145,22 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - simd_add(a, vshrq_n_u32::(b)) +pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92758,26 +44169,24 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_add(a, vshrq_n_u32::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92786,24 +44195,22 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshr_n_u64::(b)) +pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + transmute(a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92812,24 +44219,24 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - simd_add(a, vshrq_n_u64::(b)) +pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -92838,2061 +44245,2166 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_add(a, vshrq_n_u64::(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(1 <= N && N <= 8); - vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(1 <= N && N <= 8); - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(1 <= N && N <= 8); - vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(1 <= N && N <= 8); - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(1 <= N && N <= 16); - vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(1 <= N && N <= 16); - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(1 <= N && N <= 16); - vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(1 <= N && N <= 16); - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(1 <= N && N <= 32); - vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(1 <= N && N <= 32); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(1 <= N && N <= 32); - vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(1 <= N && N <= 32); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(1 <= N && N <= 64); - vshiftins_v1i64(a, b, int64x1_t::splat(-N as i64)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(1 <= N && N <= 64); - vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(1 <= N && N <= 64); - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(1 <= N && N <= 8); - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(-N as i8), - )) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(1 <= N && N <= 8); - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(-N as i8), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(1 <= N && N <= 8); - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(-N as i8), - )) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(1 <= N && N <= 8); - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(-N as i8), - )); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(1 <= N && N <= 16); - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(-N as i16), - )) +pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(1 <= N && N <= 16); - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(-N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(1 <= N && N <= 16); - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(-N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(1 <= N && N <= 16); - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(-N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(1 <= N && N <= 32); - transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(-N), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(1 <= N && N <= 32); - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = transmute(vshiftins_v2i32( - transmute(a), - transmute(b), - int32x2_t::splat(-N), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(1 <= N && N <= 32); - transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(-N), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(1 <= N && N <= 32); - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = transmute(vshiftins_v4i32( - transmute(a), - transmute(b), - int32x4_t::splat(-N), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v1i64( - transmute(a), - transmute(b), - int64x1_t::splat(-N as i64), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(1 <= N && N <= 64); - transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(1 <= N && N <= 64); - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = transmute(vshiftins_v2i64( - transmute(a), - transmute(b), - int64x2_t::splat(-N as i64), - )); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(1 <= N && N <= 8); - transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(-N as i8), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(1 <= N && N <= 8); - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly8x8_t = transmute(vshiftins_v8i8( - transmute(a), - transmute(b), - int8x8_t::splat(-N as i8), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(1 <= N && N <= 8); - transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(-N as i8), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(1 <= N && N <= 8); - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: poly8x16_t = transmute(vshiftins_v16i8( - transmute(a), - transmute(b), - int8x16_t::splat(-N as i8), - )); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(1 <= N && N <= 16); - transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(-N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(1 <= N && N <= 16); - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: poly16x4_t = transmute(vshiftins_v4i16( - transmute(a), - transmute(b), - int16x4_t::splat(-N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(1 <= N && N <= 16); - transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(-N as i16), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { + transmute(a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(1 <= N && N <= 16); - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: poly16x8_t = transmute(vshiftins_v8i16( - transmute(a), - transmute(b), - int16x8_t::splat(-N as i16), - )); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - vst1_v2f32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - vst1_v2f32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - vst1q_v4f32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1q_v4f32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { - vst1_v1i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - vst1_v8i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_v8i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - vst1q_v16i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - vst1q_v16i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - vst1_v4i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1_v4i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - vst1q_v8i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_v8i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - vst1_v2i32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - vst1_v2i32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - vst1q_v4i32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1q_v4i32( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { - vst1_v1i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - vst1q_v2i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - vst1q_v2i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - vst1_v8i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_v8i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - vst1q_v16i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - vst1q_v16i8( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - vst1_v4i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - vst1_v4i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - vst1q_v8i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_v8i16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { - vst1_v1i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - vst1q_v2i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - let a: poly64x2_t = simd_shuffle!(a, a, [0, 1]); - vst1q_v2i64( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] - fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); - } - _vst1_f32_x2(a, b.0, b.1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] - fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1_f32_x2(a, b.0, b.1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] - fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); - } - _vst1q_f32_x2(a, b.0, b.1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] - fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1q_f32_x2(a, b.0, b.1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" - )] - fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); - } - _vst1_f32_x2(b.0, b.1, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" - )] - fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1_f32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" - )] - fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); - } - _vst1q_f32_x2(b.0, b.1, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" - )] - fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1q_f32_x2(b.0, b.1, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] - fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); - } - _vst1_f32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] - fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1_f32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] - fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); - } - _vst1q_f32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] - fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1q_f32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" - )] - fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); - } - _vst1_f32_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" - )] - fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1_f32_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" - )] - fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); - } - _vst1q_f32_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" - )] - fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1q_f32_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] - fn _vst1_f32_x4( - ptr: *mut f32, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ); - } - _vst1_f32_x4(a, b.0, b.1, b.2, b.3) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] - fn _vst1_f32_x4( - ptr: *mut f32, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ); - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1_f32_x4(a, b.0, b.1, b.2, b.3) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] - fn _vst1q_f32_x4( - ptr: *mut f32, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ); - } - _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] - fn _vst1q_f32_x4( - ptr: *mut f32, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ); - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { + let ret_val: int64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" - )] - fn _vst1_f32_x4( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ptr: *mut f32, - ); - } - _vst1_f32_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" - )] - fn _vst1_f32_x4( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ptr: *mut f32, - ); - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1_f32_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" - )] - fn _vst1q_f32_x4( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ptr: *mut f32, - ); - } - _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" - )] - fn _vst1q_f32_x4( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ptr: *mut f32, - ); - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -94901,24 +46413,22 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -94927,25 +46437,23 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -94954,24 +46462,22 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -94980,25 +46486,23 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { + let ret_val: uint64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95007,24 +46511,22 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95033,25 +46535,27 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95060,24 +46564,22 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95086,25 +46588,23 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { - static_assert_uimm_bits!(LANE, 4); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95113,24 +46613,22 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95139,25 +46637,23 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95166,24 +46662,22 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95192,25 +46686,23 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95219,24 +46711,22 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95245,25 +46735,23 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95272,24 +46760,22 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95298,25 +46784,24 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95325,24 +46810,22 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95351,25 +46834,23 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95378,51 +46859,47 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95431,24 +46908,22 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95457,25 +46932,24 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { - static_assert_uimm_bits!(LANE, 4); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95484,24 +46958,22 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95510,25 +46982,23 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95537,24 +47007,22 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95563,25 +47031,23 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95590,24 +47056,22 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95616,25 +47080,24 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95643,24 +47106,22 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95669,25 +47130,23 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95696,24 +47155,22 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95722,25 +47179,23 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95749,24 +47204,22 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95775,25 +47228,23 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95802,24 +47253,22 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95828,25 +47277,24 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { - static_assert_uimm_bits!(LANE, 4); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + let a: uint8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95855,24 +47303,22 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95881,25 +47327,23 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { - static_assert_uimm_bits!(LANE, 2); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95908,24 +47352,22 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95934,24 +47376,23 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { - static_assert_uimm_bits!(LANE, 3); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95960,23 +47401,22 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -95985,23 +47425,24 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(nop) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -96010,21 +47451,21 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96034,20 +47475,22 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96057,20 +47500,21 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96080,11 +47524,12 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] @@ -96094,7 +47539,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96104,21 +47549,47 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96128,24 +47599,21 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { - let mut b: poly64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst1q_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96155,21 +47623,22 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96179,25 +47648,21 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { - let mut b: poly64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst1q_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96207,21 +47672,22 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -96231,1925 +47697,1623 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { - let mut b: poly64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst1q_s64_x4(transmute(a), transmute(b)) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" - )] - fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); - } - _vst1_s8_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" - )] - fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" - )] - fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); - } - _vst1q_s8_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" - )] - fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); - } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" - )] - fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); - } - _vst1_s16_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" - )] - fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1_s16_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" - )] - fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); - } - _vst1q_s16_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" - )] - fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); - } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" - )] - fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); - } - _vst1_s32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" - )] - fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); - } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1_s32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" - )] - fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); - } - _vst1q_s32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" - )] - fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1q_s32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" - )] - fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); - } - _vst1_s64_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" - )] - fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" - )] - fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); - } - let mut b: int64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1q_s64_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] - fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); - } - _vst1_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] - fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] - fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); - } - _vst1q_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] - fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); - } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] - fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); - } - _vst1_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] - fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] - fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); - } - _vst1q_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] - fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); - } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] - fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); - } - _vst1_s32_x2(a, b.0, b.1) +pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] - fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); - } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1_s32_x2(a, b.0, b.1) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] - fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); - } - _vst1q_s32_x2(a, b.0, b.1) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] - fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst1q_s32_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] - fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); - } - _vst1_s64_x2(a, b.0, b.1) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] - fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); - } - _vst1q_s64_x2(a, b.0, b.1) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] - fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); - } - let mut b: int64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst1q_s64_x2(a, b.0, b.1) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" - )] - fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - _vst1_s8_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" - )] - fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" - )] - fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - _vst1q_s8_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" - )] - fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly64x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" - )] - fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); - } - _vst1_s16_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" - )] - fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1_s16_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { + let ret_val: int8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" - )] - fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); - } - _vst1q_s16_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" - )] - fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + let ret_val: int16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" - )] - fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); - } - _vst1_s32_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + let ret_val: int32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" - )] - fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1_s32_x3(b.0, b.1, b.2, a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" - )] - fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); - } - _vst1q_s32_x3(b.0, b.1, b.2, a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + let ret_val: uint8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" - )] - fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1q_s32_x3(b.0, b.1, b.2, a) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" - )] - fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); - } - _vst1_s64_x3(b.0, b.1, b.2, a) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { + let ret_val: uint16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" - )] - fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" - )] - fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); - } - let mut b: int64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1q_s64_x3(b.0, b.1, b.2, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + let ret_val: uint32x2_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] - fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); - } - _vst1_s8_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] - fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + let ret_val: poly8x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] - fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); - } - _vst1q_s8_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] - fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); - } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { + let ret_val: poly16x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] - fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); - } - _vst1_s16_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] - fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1_s16_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] - fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); - } - _vst1q_s16_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] - fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] - fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); - } - _vst1_s32_x3(a, b.0, b.1, b.2) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + transmute(a) +} +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] - fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1_s32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] - fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); - } - _vst1q_s32_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] - fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst1q_s32_x3(a, b.0, b.1, b.2) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64.p0")] - fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); - } - _vst1_s64_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] - fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); - } - _vst1q_s64_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] - fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); - } - let mut b: int64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst1q_s64_x3(a, b.0, b.1, b.2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" - )] - fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - _vst1_s8_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" - )] - fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" - )] - fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" - )] - fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" - )] - fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); - } - _vst1_s16_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" - )] - fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); - } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1_s16_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly8x16_t = transmute(a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" - )] - fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); - } - _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + transmute(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] +#[doc = "Vector reinterpret cast operation"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly16x8_t = transmute(a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" + link_name = "llvm.aarch64.neon.srhadd.v8i8" )] - fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] + fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) + _vrhadd_s8(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" + link_name = "llvm.aarch64.neon.srhadd.v16i8" )] - fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] + fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vst1_s32_x4(b.0, b.1, b.2, b.3, a) + _vrhaddq_s8(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" + link_name = "llvm.aarch64.neon.srhadd.v4i16" )] - fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] + fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1_s32_x4(b.0, b.1, b.2, b.3, a) + _vrhadd_s16(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" + link_name = "llvm.aarch64.neon.srhadd.v8i16" )] - fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] + fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) + _vrhaddq_s16(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" + link_name = "llvm.aarch64.neon.srhadd.v2i32" )] - fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] + fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) + _vrhadd_s32(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" + link_name = "llvm.aarch64.neon.srhadd.v4i32" )] - fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] + fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vst1_s64_x4(b.0, b.1, b.2, b.3, a) + _vrhaddq_s32(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" + link_name = "llvm.aarch64.neon.urhadd.v8i8" )] - fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] + fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) + _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" + link_name = "llvm.aarch64.neon.urhadd.v16i8" )] - fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); - } - let mut b: int64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] - fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); - } - _vst1_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] - fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] - fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); - } - _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] - fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); - } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] - fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); - } - _vst1_s16_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] - fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] + fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1_s16_x4(a, b.0, b.1, b.2, b.3) + _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] - fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] + fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) + _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] - fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] + fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) + _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] - fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] + fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - _vst1_s32_x4(a, b.0, b.1, b.2, b.3) + _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(urhadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] - fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] + fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1_s32_x4(a, b.0, b.1, b.2, b.3) + _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] - fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] + fn _vrndn_f32(a: float32x2_t) -> float32x2_t; } - _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) + _vrndn_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(frintn) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] - fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frintn.v4f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] + fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) + _vrndnq_f32(a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64.p0")] - fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i8" + )] + fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vst1_s64_x4(a, b.0, b.1, b.2, b.3) + _vrshl_s8(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] - fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v16i8" + )] + fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) + _vrshlq_s8(a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(srshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] - fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i16" + )] + fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - let mut b: int64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) + _vrshl_s16(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98159,21 +49323,28 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i16" + )] + fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrshlq_s16(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98183,24 +49354,28 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { - let mut b: uint8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i32" + )] + fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrshl_s32(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98210,21 +49385,28 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i32" + )] + fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrshlq_s32(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98234,25 +49416,28 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { - let mut b: uint8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v1i64" + )] + fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vrshl_s64(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98262,21 +49447,28 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i64" + )] + fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vrshlq_s64(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98286,26 +49478,28 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { - let mut b: uint8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i8" + )] + fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vrshl_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98315,21 +49509,28 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v16i8" + )] + fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + _vrshlq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98339,32 +49540,28 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { - let mut b: uint8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i16" + )] + fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + _vrshl_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98374,21 +49571,28 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i16" + )] + fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + _vrshlq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98398,37 +49602,28 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { - let mut b: uint8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i32" + )] + fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + _vrshl_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98438,21 +49633,28 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i32" + )] + fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vrshlq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98462,42 +49664,28 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { - let mut b: uint8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v1i64" + )] + fn _vrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vrshl_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -98507,22 +49695,30 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i64" + )] + fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vrshlq_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98531,25 +49727,23 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { - let mut b: uint16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_s8(a, vdup_n_s8(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98558,22 +49752,23 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_s8(a, vdupq_n_s8(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98582,26 +49777,23 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { - let mut b: uint16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_s16(a, vdup_n_s16(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98610,22 +49802,23 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_s16(a, vdupq_n_s16(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98634,27 +49827,23 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { - let mut b: uint16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_s32(a, vdup_n_s32(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98663,22 +49852,23 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_s32(a, vdupq_n_s32(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98687,25 +49877,23 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { - let mut b: uint16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_s64(a, vdup_n_s64(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98714,22 +49902,23 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_s64(a, vdupq_n_s64(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98738,26 +49927,23 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { - let mut b: uint16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_u8(a, vdup_n_s8(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98766,22 +49952,23 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_u8(a, vdupq_n_s8(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98790,27 +49977,23 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { - let mut b: uint16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_u16(a, vdup_n_s16(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98819,22 +50002,23 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { - vst1_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_u16(a, vdupq_n_s16(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98843,25 +50027,23 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { - let mut b: uint32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst1_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_u32(a, vdup_n_s32(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98870,22 +50052,23 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { - vst1_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_u32(a, vdupq_n_s32(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98894,26 +50077,23 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { - let mut b: uint32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst1_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_u64(a, vdup_n_s64(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98922,22 +50102,151 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { - vst1_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_u64(a, vdupq_n_s64(-N as _)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] + fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + _vrshrn_n_s16( + a, + const { + int16x8_t([ + -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, + -N as i16, + ]) + }, + ) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] + fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + _vrshrn_n_s32( + a, + const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, + ) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] + fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v8i8" + )] + fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + _vrshrn_n_s16(a, N) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v4i16" + )] + fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + _vrshrn_n_s32(a, N) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v2i32" + )] + fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + _vrshrn_n_s64(a, N) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98946,27 +50255,23 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { - let mut b: uint32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst1_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + transmute(vrshrn_n_s16::(transmute(a))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98975,22 +50280,23 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { - vst1q_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + transmute(vrshrn_n_s32::(transmute(a))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -98999,24 +50305,21 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { - let mut b: uint32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst1q_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + transmute(vrshrn_n_s64::(transmute(a))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -99026,21 +50329,28 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { - vst1q_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f32" + )] + fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; + } + _vrsqrte_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -99050,25 +50360,28 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { - let mut b: uint32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst1q_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v4f32" + )] + fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + } + _vrsqrteq_f32(a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -99078,21 +50391,28 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { - vst1q_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v2i32" + )] + fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; + } + _vrsqrte_u32(a.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -99102,25 +50422,28 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { - let mut b: uint32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst1q_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v4i32" + )] + fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; + } + _vrsqrteq_u32(a.as_signed()).as_unsigned() } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), @@ -99130,20 +50453,28 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f32" + )] + fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + _vrsqrts_f32(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), @@ -99153,21 +50484,30 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v4f32" + )] + fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + _vrsqrtsq_f32(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99176,22 +50516,23 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshr_n_s8::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99200,22 +50541,23 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshrq_n_s8::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99224,25 +50566,23 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { - let mut b: uint64x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst1q_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshr_n_s16::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99251,22 +50591,23 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshrq_n_s16::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99275,26 +50616,23 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { - let mut b: uint64x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst1q_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshr_n_s32::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99303,22 +50641,23 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshrq_n_s32::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99327,27 +50666,23 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { - let mut b: uint64x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst1q_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshr_n_s64::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99356,22 +50691,23 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshrq_n_s64::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99380,25 +50716,23 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { - let mut b: poly8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshr_n_u8::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99407,22 +50741,23 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vrshrq_n_u8::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99431,26 +50766,23 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { - let mut b: poly8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshr_n_u16::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99459,22 +50791,23 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vrshrq_n_u16::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99483,27 +50816,23 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { - let mut b: poly8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshr_n_u32::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99512,22 +50841,23 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vrshrq_n_u32::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99536,33 +50866,23 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { - let mut b: poly8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshr_n_u64::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99571,21 +50891,21 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vrshrq_n_u64::(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99595,37 +50915,28 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { - let mut b: poly8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v8i8" + )] + fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + _vrsubhn_s16(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99635,21 +50946,28 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v4i16" + )] + fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + _vrsubhn_s32(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99659,42 +50977,29 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { - let mut b: poly8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v2i32" + )] + fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + _vrsubhn_s64(a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99704,21 +51009,21 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + transmute(vrsubhn_s16(transmute(a), transmute(b))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99728,24 +51033,24 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { - let mut b: poly16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vrsubhn_s16(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99755,21 +51060,21 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + transmute(vrsubhn_s32(transmute(a), transmute(b))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99779,25 +51084,24 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { - let mut b: poly16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = transmute(vrsubhn_s32(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99807,21 +51111,21 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + transmute(vrsubhn_s64(transmute(a), transmute(b))) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -99831,27 +51135,25 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { - let mut b: poly16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = transmute(vrsubhn_s64(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [1, 0]) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99860,22 +51162,23 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99884,25 +51187,23 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { - let mut b: poly16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99911,22 +51212,23 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99935,26 +51237,23 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { - let mut b: poly16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99963,22 +51262,23 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -99987,376 +51287,342 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { - let mut b: poly16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst1q_s16_x4(transmute(a), transmute(b)) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v1i64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] - fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); - } - _vst1_v1i64(addr, val, align) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] - fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); - } - _vst1_v2f32(addr, val, align) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] - fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); - } - let val: float32x2_t = simd_shuffle!(val, val, [0, 1]); - _vst1_v2f32(addr, val, align) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] - fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); - } - _vst1_v2i32(addr, val, align) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] - fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); - } - let val: int32x2_t = simd_shuffle!(val, val, [0, 1]); - _vst1_v2i32(addr, val, align) +pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] - fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); - } - _vst1_v4i16(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] - fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); - } - let val: int16x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); - _vst1_v4i16(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] - fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); - } - _vst1_v8i8(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] - fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); - } - let val: int8x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1_v8i8(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] - fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); - } - _vst1q_v16i8(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] - fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); - } - let val: int8x16_t = simd_shuffle!( - val, - val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst1q_v16i8(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] - fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); - } - _vst1q_v2i64(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] - fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); - } - let val: int64x2_t = simd_shuffle!(val, val, [0, 1]); - _vst1q_v2i64(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] - fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); - } - _vst1q_v4f32(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] - fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); - } - let val: float32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); - _vst1q_v4f32(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] - fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); - } - _vst1q_v4i32(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] - fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); - } - let val: int32x4_t = simd_shuffle!(val, val, [0, 1, 2, 3]); - _vst1q_v4i32(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] - fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); - } - _vst1q_v8i16(addr, val, align) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] - fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); - } - let val: int16x8_t = simd_shuffle!(val, val, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst1q_v8i16(addr, val, align) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -100371,18 +51637,17 @@ unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -100397,1297 +51662,1294 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { - static_assert_uimm_bits!(LANE, 1); - let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" - )] - fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); - } - _vst2_f32(b.0, b.1, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" - )] - fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_f32(b.0, b.1, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { + static_assert!(LANE == 0); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" - )] - fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); - } - _vst2q_f32(b.0, b.1, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(b, LANE as u32, a) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "SHA1 hash update accelerator, choose."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1c))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" + link_name = "llvm.aarch64.crypto.sha1c" )] - fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] + fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_f32(b.0, b.1, a as _) + _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "SHA1 fixed rotate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" + link_name = "llvm.aarch64.crypto.sha1h" )] - fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] + fn _vsha1h_u32(hash_e: i32) -> i32; } - _vst2_s8(b.0, b.1, a as _) + _vsha1h_u32(hash_e.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "SHA1 hash update accelerator, majority"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1m))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" + link_name = "llvm.aarch64.crypto.sha1m" )] - fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] + fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2_s8(b.0, b.1, a as _) + _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "SHA1 hash update accelerator, parity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1p))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" + link_name = "llvm.aarch64.crypto.sha1p" )] - fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] + fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; } - _vst2q_s8(b.0, b.1, a as _) + _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" + link_name = "llvm.aarch64.crypto.sha1su0" )] - fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] + fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst2q_s8(b.0, b.1, a as _) + _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "SHA1 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" + link_name = "llvm.aarch64.crypto.sha1su1" )] - fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] + fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; } - _vst2_s16(b.0, b.1, a as _) + _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "SHA1 schedule update accelerator, upper part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h2))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" + link_name = "llvm.aarch64.crypto.sha256h2" )] - fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] + fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2_s16(b.0, b.1, a as _) + _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, +) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" + link_name = "llvm.aarch64.crypto.sha256h" )] - fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] + fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; } - _vst2q_s16(b.0, b.1, a as _) + _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "SHA256 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" + link_name = "llvm.aarch64.crypto.sha256su0" )] - fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] + fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2q_s16(b.0, b.1, a as _) + _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "SHA256 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub unsafe fn vsha256su1q_u32( + tw0_3: uint32x4_t, + w8_11: uint32x4_t, + w12_15: uint32x4_t, +) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" + link_name = "llvm.aarch64.crypto.sha256su1" )] - fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] + fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; } - _vst2_s32(b.0, b.1, a as _) + _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" - )] - fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] + fn _vshiftins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_s32(b.0, b.1, a as _) + _vshiftins_v16i8(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v1i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" - )] - fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] + fn _vshiftins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; } - _vst2q_s32(b.0, b.1, a as _) + _vshiftins_v1i64(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" - )] - fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] + fn _vshiftins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_s32(b.0, b.1, a as _) + _vshiftins_v2i32(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v2i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { +unsafe fn vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] - fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] + fn _vshiftins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; } - _vst2_f32(a as _, b.0, b.1, 4) + _vshiftins_v2i64(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { +unsafe fn vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] - fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] + fn _vshiftins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_f32(a as _, b.0, b.1, 4) + _vshiftins_v4i16(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v4i32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { +unsafe fn vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] - fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] + fn _vshiftins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; } - _vst2q_f32(a as _, b.0, b.1, 4) + _vshiftins_v4i32(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { +unsafe fn vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] - fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] + fn _vshiftins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_f32(a as _, b.0, b.1, 4) + _vshiftins_v8i16(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v8i8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { +unsafe fn vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] - fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] + fn _vshiftins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } - _vst2_s8(a as _, b.0, b.1, 1) + _vshiftins_v8i8(a, b, c) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] - fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2_s8(a as _, b.0, b.1, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdup_n_s8(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] - fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); - } - _vst2q_s8(a as _, b.0, b.1, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdupq_n_s8(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] - fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); - } - let mut b: int8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst2q_s8(a as _, b.0, b.1, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdup_n_s16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] - fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); - } - _vst2_s16(a as _, b.0, b.1, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdupq_n_s16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] - fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2_s16(a as _, b.0, b.1, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdup_n_s32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] - fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); - } - _vst2q_s16(a as _, b.0, b.1, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdupq_n_s32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] - fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); - } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2q_s16(a as _, b.0, b.1, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdup_n_s64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] - fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); - } - _vst2_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdupq_n_s64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] - fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); - } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdup_n_u8(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] - fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); - } - _vst2q_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + simd_shl(a, vdupq_n_u8(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] - fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdup_n_u16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" - )] - fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); - } - _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + simd_shl(a, vdupq_n_u16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" - )] - fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdup_n_u32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" - )] - fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + simd_shl(a, vdupq_n_u32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" - )] - fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdup_n_u64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" - )] - fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); - } - _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + simd_shl(a, vdupq_n_u64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" + link_name = "llvm.aarch64.neon.sshl.v8i8" )] - fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); + fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) + _vshl_s8(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.sshl.v16i8" )] - fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) + _vshlq_s8(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.sshl.v4i16" )] - fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) + _vshl_s16(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.sshl.v8i16" )] - fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) + _vshlq_s16(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.sshl.v2i32" )] - fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) + _vshl_s32(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.sshl.v4i32" )] - fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); + fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) + _vshlq_s32(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.sshl.v1i64" )] - fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); + fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) + _vshl_s64(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sshl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.sshl.v2i64" )] - fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); + fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) + _vshlq_s64(a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.ushl.v8i8" )] - fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); - } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] - fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); - } - _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] - fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); - } - let mut b: float32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] - fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); - } - _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] - fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); - } - let mut b: float32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] - fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); - } - _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] - fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); - } - let mut b: int8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] - fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); - } - _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] - fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); - } - let mut b: int16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] - fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); - } - _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] - fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); + fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - let mut b: int16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) + _vshl_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] - fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v16i8" + )] + fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) + _vshlq_u8(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] - fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i16" + )] + fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - let mut b: int32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) + _vshl_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] - fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i16" + )] + fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) + _vshlq_u16(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] - fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i32" + )] + fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - let mut b: int32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) + _vshl_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101696,24 +52958,29 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i32" + )] + fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + _vshlq_u32(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101722,27 +52989,29 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v1i64" + )] + fn _vshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + _vshl_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101751,24 +53020,30 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i64" + )] + fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + _vshlq_u64(a.as_signed(), b).as_unsigned() } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101777,27 +53052,23 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + simd_shl(simd_cast(a), vdupq_n_s32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101806,24 +53077,23 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + simd_shl(simd_cast(a), vdupq_n_s64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101832,27 +53102,23 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + simd_shl(simd_cast(a), vdupq_n_s16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101861,24 +53127,23 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + simd_shl(simd_cast(a), vdupq_n_u32(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101887,27 +53152,23 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + simd_shl(simd_cast(a), vdupq_n_u64(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101916,24 +53177,23 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2q_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + simd_shl(simd_cast(a), vdupq_n_u16(N as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101942,27 +53202,24 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2q_lane_s32::(transmute(a), transmute(b)) +pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + simd_shr(a, vdup_n_s8(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101971,24 +53228,24 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + simd_shr(a, vdupq_n_s8(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -101997,27 +53254,24 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2_lane_s8::(transmute(a), transmute(b)) +pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + simd_shr(a, vdup_n_s16(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102026,24 +53280,24 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + simd_shr(a, vdupq_n_s16(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102052,27 +53306,24 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + simd_shr(a, vdup_n_s32(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102081,24 +53332,24 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + simd_shr(a, vdupq_n_s32(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102107,25 +53358,24 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + simd_shr(a, vdup_n_s64(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102134,56 +53384,84 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + simd_shr(a, vdupq_n_s64(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v1i64.p0")] - fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); - } - _vst2_s64(a as _, b.0, b.1, 8) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdup_n_u8(0); + } else { + N + }; + simd_shr(a, vdup_n_u8(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" - )] - fn _vst2_s64(a: int64x1_t, b: int64x1_t, ptr: *mut i8); - } - _vst2_s64(b.0, b.1, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ushr, N = 2) +)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdupq_n_u8(0); + } else { + N + }; + simd_shr(a, vdupq_n_u8(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102192,22 +53470,28 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) +pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdup_n_u16(0); + } else { + N + }; + simd_shr(a, vdup_n_u16(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102216,22 +53500,28 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdupq_n_u16(0); + } else { + N + }; + simd_shr(a, vdupq_n_u16(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102240,25 +53530,28 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { - let mut b: uint8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2_s8(transmute(a), transmute(b)) +pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdup_n_u32(0); + } else { + N + }; + simd_shr(a, vdup_n_u32(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102267,22 +53560,28 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdupq_n_u32(0); + } else { + N + }; + simd_shr(a, vdupq_n_u32(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102291,33 +53590,28 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { - let mut b: uint8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdup_n_u64(0); + } else { + N + }; + simd_shr(a, vdup_n_u64(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ushr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102326,22 +53620,28 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdupq_n_u64(0); + } else { + N + }; + simd_shr(a, vdupq_n_u64(n as _)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102350,25 +53650,23 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { - let mut b: uint16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_cast(simd_shr(a, vdupq_n_s16(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102377,22 +53675,23 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_cast(simd_shr(a, vdupq_n_s32(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102401,25 +53700,23 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { - let mut b: uint16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2q_s16(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_cast(simd_shr(a, vdupq_n_s64(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102428,22 +53725,23 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { - vst2_s32(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_cast(simd_shr(a, vdupq_n_u16(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102452,25 +53750,23 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { - let mut b: uint32x2x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - vst2_s32(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_cast(simd_shr(a, vdupq_n_u32(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(shrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102479,73 +53775,351 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { - vst2q_s32(transmute(a), transmute(b)) +pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_cast(simd_shr(a, vdupq_n_u64(N as _))) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vshiftins_v8i8(a, b, int8x8_t::splat(N as i8)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vshiftins_v16i8(a, b, int8x16_t::splat(N as i8)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vshiftins_v4i16(a, b, int16x4_t::splat(N as i16)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vshiftins_v8i16(a, b, int16x8_t::splat(N as i16)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); + vshiftins_v2i32(a, b, int32x2_t::splat(N)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); + vshiftins_v4i32(a, b, int32x4_t::splat(N)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 0 && N <= 63); + vshiftins_v1i64(a, b, int64x1_t::splat(N as i64)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); + vshiftins_v2i64(a, b, int64x2_t::splat(N as i64)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(N as i32), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(N as i32), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 0 && N <= 63); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(N as i64), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(N as i64), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(N as i8), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(N as i8), + )) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { - let mut b: uint32x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2q_s32(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(N as i16), + )) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(N as i16), + )) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102554,25 +54128,23 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { - let mut b: poly8x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2_s8(transmute(a), transmute(b)) +pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshr_n_s8::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102581,22 +54153,23 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshrq_n_s8::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102605,33 +54178,23 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { - let mut b: poly8x16x2_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst2q_s8(transmute(a), transmute(b)) +pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshr_n_s16::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102640,22 +54203,23 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshrq_n_s16::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102664,25 +54228,23 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { - let mut b: poly16x4x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - vst2_s16(transmute(a), transmute(b)) +pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshr_n_s32::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102691,22 +54253,23 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) +pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshrq_n_s32::(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -102715,2000 +54278,1186 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { - let mut b: poly16x8x2_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - vst2q_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] - fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); - } - _vst3_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] - fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] - fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); - } - _vst3q_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] - fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_f32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] - fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); - } - _vst3_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] - fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] - fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); - } - _vst3q_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] - fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); - } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst3q_s8(a as _, b.0, b.1, b.2, 1) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] - fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); - } - _vst3_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] - fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] - fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); - } - _vst3q_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] - fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3q_s16(a as _, b.0, b.1, b.2, 2) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] - fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); - } - _vst3_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] - fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] - fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); - } - _vst3q_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] - fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_s32(a as _, b.0, b.1, b.2, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" - )] - fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); - } - _vst3_f32(b.0, b.1, b.2, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" - )] - fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_f32(b.0, b.1, b.2, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" - )] - fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); - } - _vst3q_f32(b.0, b.1, b.2, a as _) +pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshr_n_s64::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" - )] - fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_f32(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshrq_n_s64::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" - )] - fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - _vst3_s8(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshr_n_u8::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" - )] - fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3_s8(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + simd_add(a, vshrq_n_u8::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" - )] - fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - _vst3q_s8(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshr_n_u16::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" - )] - fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - let mut b: int8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst3q_s8(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + simd_add(a, vshrq_n_u16::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" - )] - fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); - } - _vst3_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshr_n_u32::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" - )] - fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + simd_add(a, vshrq_n_u32::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" - )] - fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); - } - _vst3q_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshr_n_u64::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" - )] - fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3q_s16(b.0, b.1, b.2, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + simd_add(a, vshrq_n_u64::(b)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" - )] - fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); - } - _vst3_s32(b.0, b.1, b.2, a as _) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(1 <= N && N <= 8); + vshiftins_v8i8(a, b, int8x8_t::splat(-N as i8)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" - )] - fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_s32(b.0, b.1, b.2, a as _) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(1 <= N && N <= 8); + vshiftins_v16i8(a, b, int8x16_t::splat(-N as i8)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" - )] - fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); - } - _vst3q_s32(b.0, b.1, b.2, a as _) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(1 <= N && N <= 16); + vshiftins_v4i16(a, b, int16x4_t::splat(-N as i16)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" - )] - fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_s32(b.0, b.1, b.2, a as _) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(1 <= N && N <= 16); + vshiftins_v8i16(a, b, int16x8_t::splat(-N as i16)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] - fn _vst3_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ); - } - _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(1 <= N && N <= 32); + vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] - fn _vst3_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(1 <= N && N <= 32); + vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(1 <= N && N <= 64); + vshiftins_v1i64(a, b, int64x1_t::splat(-N as i64)) +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] - fn _vst3q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(1 <= N && N <= 64); + vshiftins_v2i64(a, b, int64x2_t::splat(-N as i64)) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] - fn _vst3q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] - fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); - } - _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] - fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] - fn _vst3_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ); - } - _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] - fn _vst3_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(1 <= N && N <= 32); + transmute(vshiftins_v2i32( + transmute(a), + transmute(b), + int32x2_t::splat(-N), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] - fn _vst3q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(1 <= N && N <= 32); + transmute(vshiftins_v4i32( + transmute(a), + transmute(b), + int32x4_t::splat(-N), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] - fn _vst3q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v1i64( + transmute(a), + transmute(b), + int64x1_t::splat(-N as i64), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] - fn _vst3_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ); - } - _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(1 <= N && N <= 64); + transmute(vshiftins_v2i64( + transmute(a), + transmute(b), + int64x2_t::splat(-N as i64), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] - fn _vst3_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v8i8( + transmute(a), + transmute(b), + int8x8_t::splat(-N as i8), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] - fn _vst3q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(1 <= N && N <= 8); + transmute(vshiftins_v16i8( + transmute(a), + transmute(b), + int8x16_t::splat(-N as i8), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] - fn _vst3q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v4i16( + transmute(a), + transmute(b), + int16x4_t::splat(-N as i16), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" - )] - fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); - } - _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(1 <= N && N <= 16); + transmute(vshiftins_v8i16( + transmute(a), + transmute(b), + int16x8_t::splat(-N as i16), + )) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" - )] - fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); - } - let mut b: float32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + vst1_v2f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" - )] - fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + vst1q_v4f32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" - )] - fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); - } - let mut b: float32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + vst1_v8i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" - )] - fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + vst1q_v16i8(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" - )] - fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); - } - let mut b: int8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + vst1_v4i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" - )] - fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + vst1q_v8i16(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" - )] - fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); - } - let mut b: int16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + vst1_v2i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" - )] - fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + vst1q_v4i32(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" - )] - fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); - } - let mut b: int16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { + vst1_v1i64(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" - )] - fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + vst1q_v2i64(ptr as *const i8, a, crate::mem::align_of::() as i32) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" - )] - fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); - } - let mut b: int32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" - )] - fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" - )] - fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); - } - let mut b: int32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3_lane_s8::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + vst1_v2i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + vst1q_v4i32( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3_lane_s16::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { + vst1_v1i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3q_lane_s16::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + vst1_v8i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3_lane_s32::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + vst1q_v16i8( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3_lane_s32::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + vst1_v4i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3q_lane_s32::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + vst1q_v8i16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3q_lane_s32::(transmute(a), transmute(b)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { + vst1_v1i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + vst1q_v2i64( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] + fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); + } + _vst1_f32_x2(a, b.0, b.1) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] + fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); + } + _vst1q_f32_x2(a, b.0, b.1) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32" + )] + fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x2(b.0, b.1, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3_lane_s16::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32" + )] + fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); + } + _vst1q_f32_x2(b.0, b.1, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v2f32.p0")] + fn _vst1_f32_x3(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t); + } + _vst1_f32_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3q_lane_s16::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.v4f32.p0")] + fn _vst1q_f32_x3(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t); + } + _vst1q_f32_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32" + )] + fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" + link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32" )] - fn _vst3_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i8); + fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); } - _vst3_s64(b.0, b.1, b.2, a as _) + _vst1q_f32_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] - fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32.p0")] + fn _vst1_f32_x4( + ptr: *mut f32, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ); } - _vst3_s64(a as _, b.0, b.1, b.2, 8) + _vst1_f32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32.p0")] + fn _vst1q_f32_x4( + ptr: *mut f32, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ); + } + _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32" + )] + fn _vst1_f32_x4( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ptr: *mut f32, + ); + } + _vst1_f32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { - let mut b: uint8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3_s8(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32" + )] + fn _vst1q_f32_x4( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ptr: *mut f32, + ); + } + _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104717,22 +55466,23 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104741,38 +55491,23 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { - let mut b: uint8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst3q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104781,22 +55516,23 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104805,26 +55541,23 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { - let mut b: uint16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104833,22 +55566,23 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104857,26 +55591,23 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { - let mut b: uint16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104885,22 +55616,23 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { - vst3_s32(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104909,26 +55641,23 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { - let mut b: uint32x2x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - vst3_s32(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104937,22 +55666,23 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { - vst3q_s32(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104961,26 +55691,23 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { - let mut b: uint32x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3q_s32(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -104989,22 +55716,23 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105013,26 +55741,23 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { - let mut b: poly8x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105041,22 +55766,23 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105065,38 +55791,23 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { - let mut b: poly8x16x3_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst3q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105105,22 +55816,23 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105129,26 +55841,23 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { - let mut b: poly16x4x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - vst3_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105157,22 +55866,23 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -105181,2011 +55891,1125 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { - let mut b: poly16x8x3_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - vst3q_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] - fn _vst4_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - size: i32, - ); - } - _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] - fn _vst4_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - size: i32, - ); - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] - fn _vst4q_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - size: i32, - ); - } - _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] - fn _vst4q_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - size: i32, - ); - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] - fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); - } - _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] - fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] - fn _vst4q_s8( - ptr: *mut i8, - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - size: i32, - ); - } - _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] - fn _vst4q_s8( - ptr: *mut i8, - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - size: i32, - ); - } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] - fn _vst4_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - size: i32, - ); - } - _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] - fn _vst4_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - size: i32, - ); - } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] - fn _vst4q_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - size: i32, - ); - } - _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] - fn _vst4q_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - size: i32, - ); - } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] - fn _vst4_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - size: i32, - ); - } - _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] - fn _vst4_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - size: i32, - ); - } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) +pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] - fn _vst4q_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - size: i32, - ); - } - _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] - fn _vst4q_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - size: i32, - ); - } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" - )] - fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); - } - _vst4_f32(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" - )] - fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); - } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_f32(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" - )] - fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); - } - _vst4q_f32(b.0, b.1, b.2, b.3, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" - )] - fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); - } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_f32(b.0, b.1, b.2, b.3, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" - )] - fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - _vst4_s8(b.0, b.1, b.2, b.3, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" - )] - fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4_s8(b.0, b.1, b.2, b.3, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" - )] - fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - _vst4q_s8(b.0, b.1, b.2, b.3, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" - )] - fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - let mut b: int8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - _vst4q_s8(b.0, b.1, b.2, b.3, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8" )] - fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); + fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); } - _vst4_s16(b.0, b.1, b.2, b.3, a as _) + _vst1_s8_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8" )] - fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); + fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4_s16(b.0, b.1, b.2, b.3, a as _) + _vst1q_s8_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16" )] - fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); + fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); } - _vst4q_s16(b.0, b.1, b.2, b.3, a as _) + _vst1_s16_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16" )] - fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); + fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4q_s16(b.0, b.1, b.2, b.3, a as _) + _vst1q_s16_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32" )] - fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); + fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); } - _vst4_s32(b.0, b.1, b.2, b.3, a as _) + _vst1_s32_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32" )] - fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); + fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_s32(b.0, b.1, b.2, b.3, a as _) + _vst1q_s32_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64" )] - fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); + fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); } - _vst4q_s32(b.0, b.1, b.2, b.3, a as _) + _vst1_s64_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" + link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64" )] - fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); - } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_s32(b.0, b.1, b.2, b.3, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] - fn _vst4_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ); + fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); } - _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1q_s64_x2(b.0, b.1, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] - fn _vst4_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] + fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1_s8_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] - fn _vst4q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] + fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); } - _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1q_s8_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] - fn _vst4q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] + fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1_s16_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] - fn _vst4_lane_s8( - ptr: *mut i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] + fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); } - _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) + _vst1q_s16_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] - fn _vst4_lane_s8( - ptr: *mut i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] + fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) + _vst1_s32_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] - fn _vst4_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] + fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); } - _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vst1q_s32_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] - fn _vst4_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] + fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vst1_s64_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] - fn _vst4q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] + fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); } - _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vst1q_s64_x2(a, b.0, b.1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] - fn _vst4q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8" + )] + fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vst1_s8_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] - fn _vst4_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8" + )] + fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); } - _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1q_s8_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] - fn _vst4_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16" + )] + fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1_s16_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] - fn _vst4q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16" + )] + fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); } - _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1q_s16_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] - fn _vst4q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32" + )] + fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vst1_s32_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32" )] - fn _vst4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); } - _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s32_x3(b.0, b.1, b.2, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" + link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64" )] - fn _vst4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x3(b.0, b.1, b.2, a) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64" + )] + fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x3(b.0, b.1, b.2, a) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8.p0")] + fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); + } + _vst1_s8_x3(a, b.0, b.1, b.2) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8.p0")] + fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); + } + _vst1q_s8_x3(a, b.0, b.1, b.2) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16.p0")] + fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); + } + _vst1_s16_x3(a, b.0, b.1, b.2) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16.p0")] + fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); } - let mut b: float32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" - )] - fn _vst4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *mut i8, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32.p0")] + fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); } - _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" - )] - fn _vst4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *mut i8, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32.p0")] + fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); } - let mut b: float32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" - )] - fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64.p0")] + fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); } - _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s64_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" - )] - fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64.p0")] + fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); } - let mut b: int8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s64_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8" )] - fn _vst4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); } - _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8" )] - fn _vst4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); } - let mut b: int16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16" )] - fn _vst4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); } - _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16" )] - fn _vst4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); } - let mut b: int16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32" )] - fn _vst4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); } - _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32" )] - fn _vst4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); } - let mut b: int32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64" )] - fn _vst4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); } - _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vst1_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" + link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64" )] - fn _vst4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *mut i8, - ); + fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); } - let mut b: int32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4_lane_s16::(transmute(a), transmute(b)) + _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8.p0")] + fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); + } + _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: uint16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4q_lane_s16::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8.p0")] + fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); + } + _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4_lane_s32::(transmute(a), transmute(b)) +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16.p0")] + fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); + } + _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - let mut b: uint32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4_lane_s32::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16.p0")] + fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); + } + _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4q_lane_s32::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32.p0")] + fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + } + _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: uint32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4q_lane_s32::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32.p0")] + fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + } + _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64.p0")] + fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); + } + _vst1_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64.p0")] + fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + } + _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -107194,24 +57018,21 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -107220,29 +57041,21 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - let mut b: poly16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -107251,24 +57064,21 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -107277,26 +57087,20 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - let mut b: poly16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4q_lane_s16::(transmute(a), transmute(b)) +pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107306,62 +57110,20 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] - fn _vst4_s64( - ptr: *mut i8, - a: int64x1_t, - b: int64x1_t, - c: int64x1_t, - d: int64x1_t, - size: i32, - ); - } - _vst4_s64(a as _, b.0, b.1, b.2, b.3, 8) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" - )] - fn _vst4_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i8); - } - _vst4_s64(b.0, b.1, b.2, b.3, a as _) +pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107371,21 +57133,20 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) +pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107395,21 +57156,20 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107419,26 +57179,20 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { - let mut b: uint8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107448,21 +57202,20 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107472,42 +57225,20 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { - let mut b: uint8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107517,21 +57248,20 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107541,26 +57271,20 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { - let mut b: uint16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107570,21 +57294,20 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { + vst1_s32_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107594,26 +57317,20 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { - let mut b: uint16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { + vst1_s32_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107623,21 +57340,20 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { - vst4_s32(transmute(a), transmute(b)) +pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { + vst1_s32_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107647,26 +57363,20 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { - let mut b: uint32x2x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1]); - vst4_s32(transmute(a), transmute(b)) +pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { + vst1q_s32_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107676,21 +57386,20 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { - vst4q_s32(transmute(a), transmute(b)) +pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { + vst1q_s32_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107700,26 +57409,20 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { - let mut b: uint32x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4q_s32(transmute(a), transmute(b)) +pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { + vst1q_s32_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107729,21 +57432,20 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107753,26 +57455,20 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { - let mut b: poly8x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107782,21 +57478,20 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107806,42 +57501,20 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { - let mut b: poly8x16x4_t = b; - b.0 = simd_shuffle!( - b.0, - b.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.1 = simd_shuffle!( - b.1, - b.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.2 = simd_shuffle!( - b.2, - b.2, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - b.3 = simd_shuffle!( - b.3, - b.3, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - vst4q_s8(transmute(a), transmute(b)) +pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107851,21 +57524,20 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107875,26 +57547,20 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { - let mut b: poly16x4x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3]); - vst4_s16(transmute(a), transmute(b)) +pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107904,21 +57570,20 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107928,26 +57593,20 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { - let mut b: poly16x8x4_t = b; - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - b.3 = simd_shuffle!(b.3, b.3, [0, 1, 2, 3, 4, 5, 6, 7]); - vst4q_s16(transmute(a), transmute(b)) +pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107957,21 +57616,20 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - simd_sub(a, b) +pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -107981,24 +57639,20 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: float32x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108008,21 +57662,20 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - simd_sub(a, b) +pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108032,24 +57685,20 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: float32x4_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108059,21 +57708,20 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - simd_sub(a, b) +pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108083,24 +57731,20 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int16x4_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108110,21 +57754,20 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - simd_sub(a, b) +pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108134,24 +57777,20 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108161,21 +57800,20 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - simd_sub(a, b) +pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st1) )] #[cfg_attr( not(target_arch = "arm"), @@ -108185,25 +57823,192 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint16x4_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v1i64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] + fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); + } + _vst1_v1i64(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] + fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); + } + _vst1_v2f32(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v2i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] + fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); + } + _vst1_v2i32(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] + fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); + } + _vst1_v4i16(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v8i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] + fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); + } + _vst1_v8i8(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v16i8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] + fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); + } + _vst1q_v16i8(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v2i64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] + fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); + } + _vst1q_v2i64(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] + fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); + } + _vst1q_v4f32(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v4i32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] + fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); + } + _vst1q_v4i32(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8i16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] + fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); + } + _vst1q_v8i16(addr, val, align) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108212,582 +58017,576 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - simd_sub(a, b) +pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2f32.p0i8" + )] + fn _vst2_f32(a: float32x2_t, b: float32x2_t, ptr: *mut i8); + } + _vst2_f32(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4f32.p0i8" + )] + fn _vst2q_f32(a: float32x4_t, b: float32x4_t, ptr: *mut i8); + } + _vst2q_f32(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v8i8.p0i8" + )] + fn _vst2_s8(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + _vst2_s8(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v16i8.p0i8" + )] + fn _vst2q_s8(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + } + _vst2q_s8(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4i16.p0i8" + )] + fn _vst2_s16(a: int16x4_t, b: int16x4_t, ptr: *mut i8); + } + _vst2_s16(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v8i16.p0i8" + )] + fn _vst2q_s16(a: int16x8_t, b: int16x8_t, ptr: *mut i8); + } + _vst2q_s16(b.0, b.1, a as _) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v2i32.p0i8" + )] + fn _vst2_s32(a: int32x2_t, b: int32x2_t, ptr: *mut i8); + } + _vst2_s32(b.0, b.1, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v4i32.p0i8" + )] + fn _vst2q_s32(a: int32x4_t, b: int32x4_t, ptr: *mut i8); + } + _vst2q_s32(b.0, b.1, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] + fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); + } + _vst2_f32(a as _, b.0, b.1, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int32x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] + fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); + } + _vst2q_f32(a as _, b.0, b.1, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] + fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); + } + _vst2_s8(a as _, b.0, b.1, 1) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] + fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); + } + _vst2q_s8(a as _, b.0, b.1, 1) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] + fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); + } + _vst2_s16(a as _, b.0, b.1, 2) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint32x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] + fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); + } + _vst2q_s16(a as _, b.0, b.1, 2) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] + fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); + } + _vst2_s32(a as _, b.0, b.1, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] + fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); + } + _vst2q_s32(a as _, b.0, b.1, 4) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8" + )] + fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); + } + _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - simd_sub(a, b) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8" + )] + fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - simd_sub(a, b) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8" + )] + fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8" + )] + fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - simd_sub(a, b) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8" + )] + fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - simd_sub(a, b) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8" + )] + fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); + } + _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8" + )] + fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); + } + _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] + fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); + } + _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] + fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); + } + _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] + fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); + } + _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int8x16_t = simd_sub(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] + fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); + } + _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] + fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); + } + _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint8x8_t = simd_sub(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] + fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + } + _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - simd_sub(a, b) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] + fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + } + _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108796,29 +58595,23 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: uint8x16_t = simd_sub(a, b); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108827,23 +58620,23 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let d: int8x8_t = vsubhn_s16(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108852,32 +58645,23 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = vsubhn_s16(b, c); - let ret_val: int8x16_t = - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108886,23 +58670,23 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let d: int16x4_t = vsubhn_s32(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108911,27 +58695,23 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: int16x4_t = vsubhn_s32(b, c); - let ret_val: int16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108940,23 +58720,23 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let d: int32x2_t = vsubhn_s64(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3]) +pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108965,27 +58745,23 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_shuffle!(c, c, [0, 1]); - let d: int32x2_t = vsubhn_s64(b, c); - let ret_val: int32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -108994,22 +58770,21 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let d: uint8x8_t = vsubhn_u16(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -109019,85 +58794,55 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: uint8x8_t = vsubhn_u16(b, c); - let ret_val: uint8x16_t = - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let d: uint16x4_t = vsubhn_u32(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v1i64.p0")] + fn _vst2_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32); + } + _vst2_s64(a as _, b.0, b.1, 8) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_shuffle!(c, c, [0, 1, 2, 3]); - let d: uint16x4_t = vsubhn_u32(b, c); - let ret_val: uint16x8_t = simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2.v1i64.p0i8" + )] + fn _vst2_s64(a: int64x1_t, b: int64x1_t, ptr: *mut i8); + } + _vst2_s64(b.0, b.1, a as _) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -109107,22 +58852,20 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let d: uint32x2_t = vsubhn_u64(b, c); - simd_shuffle!(a, d, [0, 1, 2, 3]) +pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn2) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109132,26 +58875,20 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_shuffle!(c, c, [0, 1]); - let d: uint32x2_t = vsubhn_u64(b, c); - let ret_val: uint32x4_t = simd_shuffle!(a, d, [0, 1, 2, 3]); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109161,22 +58898,20 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109186,25 +58921,20 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - let ret_val: int8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109214,22 +58944,20 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - let c: i32x4 = i32x4::new(16, 16, 16, 16); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109239,25 +58967,20 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: i32x4 = i32x4::new(16, 16, 16, 16); - let ret_val: int16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { + vst2_s32(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109267,22 +58990,20 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - let c: i64x2 = i64x2::new(32, 32); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { + vst2q_s32(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109292,25 +59013,20 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: i64x2 = i64x2::new(32, 32); - let ret_val: int32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109320,22 +59036,20 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109345,25 +59059,20 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - let ret_val: uint8x8_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -109373,240 +59082,617 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let c: u32x4 = u32x4::new(16, 16, 16, 16); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")] + fn _vst3_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32); + } + _vst3_f32(a as _, b.0, b.1, b.2, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")] + fn _vst3q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32); + } + _vst3q_f32(a as _, b.0, b.1, b.2, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")] + fn _vst3_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32); + } + _vst3_s8(a as _, b.0, b.1, b.2, 1) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")] + fn _vst3q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32); + } + _vst3q_s8(a as _, b.0, b.1, b.2, 1) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")] + fn _vst3_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32); + } + _vst3_s16(a as _, b.0, b.1, b.2, 2) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")] + fn _vst3q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32); + } + _vst3q_s16(a as _, b.0, b.1, b.2, 2) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")] + fn _vst3_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32); + } + _vst3_s32(a as _, b.0, b.1, b.2, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")] + fn _vst3q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32); + } + _vst3q_s32(a as _, b.0, b.1, b.2, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2f32.p0i8" + )] + fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); + } + _vst3_f32(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4f32.p0i8" + )] + fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); + } + _vst3q_f32(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i8.p0i8" + )] + fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + _vst3_s8(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v16i8.p0i8" + )] + fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + _vst3q_s8(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i16.p0i8" + )] + fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); + } + _vst3_s16(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8i16.p0i8" + )] + fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); + } + _vst3q_s16(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v2i32.p0i8" + )] + fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); + } + _vst3_s32(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4i32.p0i8" + )] + fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); + } + _vst3q_s32(b.0, b.1, b.2, a as _) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] + fn _vst3_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ); + } + _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] + fn _vst3q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ); + } + _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] + fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); + } + _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] + fn _vst3_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ); + } + _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] + fn _vst3q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ); + } + _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] + fn _vst3_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ); + } + _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: u32x4 = u32x4::new(16, 16, 16, 16); - let ret_val: uint16x4_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] + fn _vst3q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ); + } + _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let c: u64x2 = u64x2::new(32, 32); - simd_cast(simd_shr(simd_sub(a, b), transmute(c))) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8" + )] + fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); + } + _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); - let c: u64x2 = u64x2::new(32, 32); - let ret_val: uint32x2_t = simd_cast(simd_shr(simd_sub(a, b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8" + )] + fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let c: int16x8_t = simd_cast(a); - let d: int16x8_t = simd_cast(b); - simd_sub(c, d) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8" + )] + fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_cast(a); - let d: int16x8_t = simd_cast(b); - let ret_val: int16x8_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8" + )] + fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let c: int32x4_t = simd_cast(a); - let d: int32x4_t = simd_cast(b); - simd_sub(c, d) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8" + )] + fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_cast(a); - let d: int32x4_t = simd_cast(b); - let ret_val: int32x4_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8" + )] + fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let c: int64x2_t = simd_cast(a); - let d: int64x2_t = simd_cast(b); - simd_sub(c, d) +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8" + )] + fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109615,27 +59701,23 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int64x2_t = simd_cast(a); - let d: int64x2_t = simd_cast(b); - let ret_val: int64x2_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109644,24 +59726,23 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - let c: uint16x8_t = simd_cast(a); - let d: uint16x8_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109670,27 +59751,23 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_cast(a); - let d: uint16x8_t = simd_cast(b); - let ret_val: uint16x8_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109699,24 +59776,23 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - let c: uint32x4_t = simd_cast(a); - let d: uint32x4_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109725,27 +59801,23 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_cast(a); - let d: uint32x4_t = simd_cast(b); - let ret_val: uint32x4_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109754,24 +59826,23 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - let c: uint64x2_t = simd_cast(a); - let d: uint64x2_t = simd_cast(b); - simd_sub(c, d) +pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109780,27 +59851,23 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint64x2_t = simd_cast(a); - let d: uint64x2_t = simd_cast(b); - let ret_val: uint64x2_t = simd_sub(c, d); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(st3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -109809,21 +59876,21 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -109833,75 +59900,55 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int16x8_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { - simd_sub(a, simd_cast(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v1i64.p0i8" + )] + fn _vst3_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i8); + } + _vst3_s64(b.0, b.1, b.2, a as _) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: int32x4_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")] + fn _vst3_s64(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32); + } + _vst3_s64(a as _, b.0, b.1, b.2, 8) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -109911,21 +59958,20 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -109935,24 +59981,20 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { - let a: int64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: int64x2_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -109962,21 +60004,20 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -109986,24 +60027,20 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: uint16x8_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -110013,21 +60050,20 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -110037,24 +60073,20 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let ret_val: uint32x4_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { + vst3_s32(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -110064,21 +60096,20 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { - simd_sub(a, simd_cast(b)) +pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { + vst3q_s32(transmute(a), transmute(b)) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(st3) )] #[cfg_attr( not(target_arch = "arm"), @@ -110088,1424 +60119,1261 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { - let a: uint64x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let ret_val: uint64x2_t = simd_sub(a, simd_cast(b)); - simd_shuffle!(ret_val, ret_val, [0, 1]) -} -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vsudot_lane_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, transmute(c), b) +pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) + assert_instr(st3) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsudot_lane_s32( - a: int32x2_t, - b: int8x8_t, - c: uint8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint32x2_t = transmute(c); - let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) + assert_instr(st3) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsudotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, transmute(c), b) +pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sudot, LANE = 0) + assert_instr(st3) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vsudotq_lane_s32( - a: int32x4_t, - b: int8x16_t, - c: uint8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint32x2_t = transmute(c); - let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vusdotq_s32(a, transmute(c), b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] - fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")] + fn _vst4_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + size: i32, + ); } - _vtbl1(a, b) + _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] - fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")] + fn _vst4q_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + size: i32, + ); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbl1(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vtbl1(a, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbl1(a, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl1(transmute(a), transmute(b))) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbl1(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl1(transmute(a), transmute(b))) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbl1(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] - fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")] + fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } - _vtbl2(a, b, c) + _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] - fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")] + fn _vst4q_s8( + ptr: *mut i8, + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + size: i32, + ); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbl2(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vtbl2(a.0, a.1, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbl2(a.0, a.1, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x2_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] - fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")] + fn _vst4_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + size: i32, + ); } - _vtbl3(a, b, c, d) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] + _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] - fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")] + fn _vst4q_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + size: i32, + ); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbl3(a, b, c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - vtbl3(a.0, a.1, a.2, b) +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")] + fn _vst4_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + size: i32, + ); + } + _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbl3(a.0, a.1, a.2, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")] + fn _vst4q_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + size: i32, + ); + } + _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2f32.p0i8" + )] + fn _vst4_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8); + } + _vst4_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4f32.p0i8" + )] + fn _vst4q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8); + } + _vst4q_f32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i8.p0i8" + )] + fn _vst4_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); + } + _vst4_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x3_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v16i8.p0i8" + )] + fn _vst4q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); + } + _vst4q_s8(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] - fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i16.p0i8" + )] + fn _vst4_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8); } - _vtbl4(a, b, c, d, e) + _vst4_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] - fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v8i16.p0i8" + )] + fn _vst4q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbl4(a, b, c, d, e); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vst4q_s16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - vtbl4(a.0, a.1, a.2, a.3, b) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v2i32.p0i8" + )] + fn _vst4_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8); + } + _vst4_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - let mut a: int8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [0, 1, 2, 3, 4, 5, 6, 7]); - a.1 = simd_shuffle!(a.1, a.1, [0, 1, 2, 3, 4, 5, 6, 7]); - a.2 = simd_shuffle!(a.2, a.2, [0, 1, 2, 3, 4, 5, 6, 7]); - a.3 = simd_shuffle!(a.3, a.3, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbl4(a.0, a.1, a.2, a.3, b); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v4i32.p0i8" + )] + fn _vst4q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8); + } + _vst4q_s32(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] + fn _vst4_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ); + } + _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] + fn _vst4q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ); + } + _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] + fn _vst4_lane_s8( + ptr: *mut i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ); + } + _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x4_t = a; - a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); - a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); - a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); - a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] - fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] + fn _vst4_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ); } - _vtbx1(a, b, c) + _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] - fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] + fn _vst4q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbx1(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - vtbx1(a, b, c) +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] + fn _vst4_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ); + } + _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbx1(a, b, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] + fn _vst4q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ); + } + _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx1(transmute(a), transmute(b), transmute(c))) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8" + )] + fn _vst4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8" + )] + fn _vst4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx1(transmute(a), transmute(b), transmute(c))) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8" + )] + fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); + } + _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8" + )] + fn _vst4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] - fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8" + )] + fn _vst4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *mut i8, + ); } - _vtbx2(a, b, c, d) + _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] - fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8" + )] + fn _vst4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *mut i8, + ); } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbx2(a, b, c, d); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) + _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - vtbx2(a, b.0, b.1, c) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8" + )] + fn _vst4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x2_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbx2(a, b.0, b.1, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x2_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x2_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] - fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; - } - _vtbx3(a, b, c, d, e) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] - fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbx3(a, b, c, d, e); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - vtbx3(a, b.0, b.1, b.2, c) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x3_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - b.0 = simd_shuffle!(b.0, b.0, [0, 1, 2, 3, 4, 5, 6, 7]); - b.1 = simd_shuffle!(b.1, b.1, [0, 1, 2, 3, 4, 5, 6, 7]); - b.2 = simd_shuffle!(b.2, b.2, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = vtbx3(a, b.0, b.1, b.2, c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x3_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")] + fn _vst4_s64( + ptr: *mut i8, + a: int64x1_t, + b: int64x1_t, + c: int64x1_t, + d: int64x1_t, + size: i32, + ); + } + _vst4_s64(a as _, b.0, b.1, b.2, b.3, 8) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4.v1i64.p0i8" + )] + fn _vst4_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i8); + } + _vst4_s64(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x3_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx4( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - f: int8x8_t, -) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] - fn _vtbx4( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - f: int8x8_t, - ) -> int8x8_t; - } - _vtbx4(a, b, c, d, e, f) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -unsafe fn vtbx4( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - f: int8x8_t, -) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] - fn _vtbx4( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - f: int8x8_t, - ) -> int8x8_t; - } - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let d: int8x8_t = simd_shuffle!(d, d, [0, 1, 2, 3, 4, 5, 6, 7]); - let e: int8x8_t = simd_shuffle!(e, e, [0, 1, 2, 3, 4, 5, 6, 7]); - let f: int8x8_t = simd_shuffle!(f, f, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int8x8_t = _vtbx4(a, b, c, d, e, f); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - vtbx4( - a, - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - ) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x4_t = b; - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x8_t = vtbx4( - a, - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - ); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { + vst4_s32(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x4_t = b; - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { + vst4q_s32(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x4_t = b; - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -111515,23 +61383,20 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(st4) )] #[cfg_attr( not(target_arch = "arm"), @@ -111541,28 +61406,20 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: float32x2x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(fsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111572,23 +61429,20 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(fsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111598,28 +61452,20 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: int32x2x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111629,23 +61475,20 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) +pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111655,28 +61498,20 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: uint32x2x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111686,23 +61521,20 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111712,28 +61544,20 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: float32x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111743,23 +61567,20 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111769,28 +61590,20 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: int8x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111800,31 +61613,20 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a1: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: int8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) +pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111834,44 +61636,20 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a1: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: int8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - let mut ret_val: int8x16x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111881,23 +61659,20 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111907,28 +61682,20 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: int16x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111938,23 +61705,20 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111964,28 +61728,20 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: int16x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -111995,23 +61751,20 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -112021,28 +61774,20 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: int32x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -112052,23 +61797,20 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(sub) )] #[cfg_attr( not(target_arch = "arm"), @@ -112078,28 +61820,20 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: uint8x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + simd_sub(a, b) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112109,31 +61843,21 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a1: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: uint8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) +pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let d: int8x8_t = vsubhn_s16(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112143,44 +61867,21 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a1: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: uint8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - let mut ret_val: uint8x16x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let d: int16x4_t = vsubhn_s32(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112190,23 +61891,21 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let d: int32x2_t = vsubhn_s64(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112216,28 +61915,21 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: uint16x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let d: uint8x8_t = vsubhn_u16(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112247,23 +61939,21 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let d: uint16x4_t = vsubhn_u32(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -112273,28 +61963,21 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: uint16x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let d: uint32x2_t = vsubhn_u64(b, c); + simd_shuffle!(a, d, [0, 1, 2, 3]) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -112304,23 +61987,21 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -112330,28 +62011,21 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: uint32x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + let c: i32x4 = i32x4::new(16, 16, 16, 16); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -112361,23 +62035,21 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + let c: i64x2 = i64x2::new(32, 32); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -112387,62 +62059,45 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: poly8x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(subhn) )] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a1: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: poly8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let c: u32x4 = u32x4::new(16, 16, 16, 16); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(subhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -112452,44 +62107,21 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a1: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: poly8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - let mut ret_val: poly8x16x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let c: u64x2 = u64x2::new(32, 32); + simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(ssubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112499,23 +62131,22 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) +pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + let c: int16x8_t = simd_cast(a); + let d: int16x8_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(ssubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112525,28 +62156,22 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - let mut ret_val: poly16x4x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + let c: int32x4_t = simd_cast(a); + let d: int32x4_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(ssubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112556,23 +62181,22 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + let c: int64x2_t = simd_cast(a); + let d: int64x2_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112582,28 +62206,22 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - let mut ret_val: poly16x8x2_t = transmute((a1, b1)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + let c: uint16x8_t = simd_cast(a); + let d: uint16x8_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112613,23 +62231,22 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let c: int8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + let c: uint32x4_t = simd_cast(a); + let d: uint32x4_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -112639,26 +62256,22 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + let c: uint64x2_t = simd_cast(a); + let d: uint64x2_t = simd_cast(b); + simd_sub(c, d) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112668,23 +62281,20 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let c: int8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112694,30 +62304,20 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_ne(c, transmute(d)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112727,23 +62327,20 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let c: int16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(usubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112753,26 +62350,20 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(usubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112782,23 +62373,20 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let c: int16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(usubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -112808,1034 +62396,1104 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + simd_sub(a, simd_cast(b)) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(sudot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let c: int32x2_t = simd_and(a, b); - let d: i32x2 = i32x2::new(0, 0); - simd_ne(c, transmute(d)) +pub unsafe fn vsudot_lane_s32( + a: int32x2_t, + b: int8x8_t, + c: uint8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, transmute(c), b) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(sudot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: int32x2_t = simd_and(a, b); - let d: i32x2 = i32x2::new(0, 0); - let ret_val: uint32x2_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +pub unsafe fn vsudotq_lane_s32( + a: int32x4_t, + b: int8x16_t, + c: uint8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: uint32x2_t = transmute(c); + let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, transmute(c), b) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] + fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + _vtbl1(a, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vtbl1(a, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl1(transmute(a), transmute(b))) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl1(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl1(transmute(a), transmute(b))) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl1(transmute(a), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] + fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + _vtbl2(a, b, c) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vtbl2(a.0, a.1, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x2_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] + fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + } + _vtbl3(a, b, c, d) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + vtbl3(a.0, a.1, a.2, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let c: int32x4_t = simd_and(a, b); - let d: i32x4 = i32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: int32x4_t = simd_and(a, b); - let d: i32x4 = i32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - let c: poly8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: poly8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x3_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - let c: poly8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +unsafe fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] + fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + } + _vtbl4(a, b, c, d, e) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: poly8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_ne(c, transmute(d)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + vtbl4(a.0, a.1, a.2, a.3, b) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { - let c: poly16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: poly16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { - let c: poly16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: poly16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x8x4_t = a; + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] + fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + _vtbx1(a, b, c) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + vtbx1(a, b, c) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c: uint8x8_t = simd_and(a, b); - let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint8x8_t = simd_and(a, b); - let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c: uint8x16_t = simd_and(a, b); - let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: uint8x16_t = simd_and(a, b); - let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint8x16_t = simd_ne(c, transmute(d)); - simd_shuffle!( - ret_val, - ret_val, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] + fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; + } + _vtbx2(a, b, c, d) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vtbx2(a, b.0, b.1, c) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c: uint16x4_t = simd_and(a, b); - let d: u16x4 = u16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint16x4_t = simd_and(a, b); - let d: u16x4 = u16x4::new(0, 0, 0, 0); - let ret_val: uint16x4_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x2_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c: uint16x8_t = simd_and(a, b); - let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: uint16x8_t = simd_and(a, b); - let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - let ret_val: uint16x8_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3, 4, 5, 6, 7]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x2_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c: uint32x2_t = simd_and(a, b); - let d: u32x2 = u32x2::new(0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] + fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + } + _vtbx3(a, b, c, d, e) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let c: uint32x2_t = simd_and(a, b); - let d: u32x2 = u32x2::new(0, 0); - let ret_val: uint32x2_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + vtbx3(a, b.0, b.1, b.2, c) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c: uint32x4_t = simd_and(a, b); - let d: u32x4 = u32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let c: uint32x4_t = simd_and(a, b); - let d: u32x4 = u32x4::new(0, 0, 0, 0); - let ret_val: uint32x4_t = simd_ne(c, transmute(d)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x3_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdot_lane_s32( - a: int32x2_t, - b: uint8x8_t, - c: int8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - vusdot_s32(a, b, transmute(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdot_lane_s32( - a: int32x2_t, - b: uint8x8_t, +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x3_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +unsafe fn vtbx4( + a: int8x8_t, + b: int8x8_t, c: int8x8_t, -) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int32x2_t = transmute(c); - let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); - let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1]) + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, +) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] + fn _vtbx4( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, + ) -> int8x8_t; + } + _vtbx4(a, b, c, d, e, f) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdotq_lane_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - vusdotq_s32(a, b, transmute(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ) } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdotq_lane_s32( - a: int32x4_t, - b: uint8x16_t, - c: int8x8_t, -) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int32x2_t = transmute(c); - let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); - let ret_val: int32x4_t = vusdotq_s32(a, b, transmute(c)); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x4_t = b; + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon instrinsic unsafe"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x8x4_t = b; + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] - fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - _vusdot_s32(a, b.as_signed(), c) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) -)] -#[cfg_attr( - not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] - fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; - } - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let c: int8x8_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7]); - let ret_val: int32x2_t = _vusdot_s32(a, b.as_signed(), c); - simd_shuffle!(ret_val, ret_val, [0, 1]) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x8x4_t = b; + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] - fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vusdotq_s32(a, b.as_signed(), c) +pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] - fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int32x4_t = _vusdotq_s32(a, b.as_signed(), c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) } -#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usmmla) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] - fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - _vusmmlaq_s32(a, b.as_signed(), c) +pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) } -#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usmmla) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] - fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let c: int8x16_t = simd_shuffle!(c, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let ret_val: int32x4_t = _vusmmlaq_s32(a, b.as_signed(), c); - simd_shuffle!(ret_val, ret_val, [0, 1, 2, 3]) +pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113845,23 +63503,22 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113871,28 +63528,30 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: float32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a1: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113902,23 +63561,22 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113928,28 +63586,22 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: int32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113959,23 +63611,22 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) +pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -113985,28 +63636,22 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: uint32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114016,23 +63661,30 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a1: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114042,28 +63694,22 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: float32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114073,23 +63719,22 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114099,28 +63744,22 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: int8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114130,31 +63769,22 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a0: int8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: int8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - transmute((a0, b0)) +pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114164,44 +63794,30 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a0: int8x16_t = simd_shuffle!( +pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a1: poly8x16_t = simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] ); - let b0: int8x16_t = simd_shuffle!( + let b1: poly8x16_t = simd_shuffle!( a, b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - let mut ret_val: int8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] ); - ret_val + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114211,23 +63827,22 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(trn) )] #[cfg_attr( not(target_arch = "arm"), @@ -114237,28 +63852,22 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: int16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114268,23 +63877,22 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + let c: int8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114294,28 +63902,22 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: int16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + let c: int8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114325,23 +63927,22 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { + let c: int16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114351,28 +63952,22 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: int32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { + let c: int16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114382,23 +63977,22 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { + let c: int32x2_t = simd_and(a, b); + let d: i32x2 = i32x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114408,28 +64002,22 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: uint8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { + let c: int32x4_t = simd_and(a, b); + let d: i32x4 = i32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114439,31 +64027,22 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - transmute((a0, b0)) +pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + let c: poly8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114473,44 +64052,22 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - let mut ret_val: uint8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + let c: poly8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114520,23 +64077,22 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { + let c: poly16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114546,28 +64102,22 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: uint16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { + let c: poly16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114577,23 +64127,22 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c: uint8x8_t = simd_and(a, b); + let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114603,28 +64152,22 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: uint16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c: uint8x16_t = simd_and(a, b); + let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114634,23 +64177,22 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c: uint16x4_t = simd_and(a, b); + let d: u16x4 = u16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114660,28 +64202,22 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: uint32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c: uint16x8_t = simd_and(a, b); + let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114691,23 +64227,22 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c: uint32x2_t = simd_and(a, b); + let d: u32x2 = u32x2::new(0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -114717,192 +64252,177 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: poly8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c: uint32x4_t = simd_and(a, b); + let d: u32x4 = u32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a0: poly8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: poly8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - transmute((a0, b0)) +pub unsafe fn vusdot_lane_s32( + a: int32x2_t, + b: uint8x8_t, + c: int8x8_t, +) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); + vusdot_s32(a, b, transmute(c)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(usdot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a0: poly8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: poly8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - let mut ret_val: poly8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vusdotq_lane_s32( + a: int32x4_t, + b: uint8x16_t, + c: int8x8_t, +) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c: int32x2_t = transmute(c); + let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); + vusdotq_s32(a, b, transmute(c)) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(usdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) +pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] + fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + _vusdot_s32(a, b.as_signed(), c) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(usdot) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - let mut ret_val: poly16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] + fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vusdotq_s32(a, b.as_signed(), c) } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] +#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(usmmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) +pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] + fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + _vusmmlaq_s32(a, b.as_signed(), c) } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(zip) )] #[cfg_attr( not(target_arch = "arm"), @@ -114912,22 +64432,16 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - let mut ret_val: poly16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -114943,17 +64457,16 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); +pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -114969,28 +64482,22 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { - let a: float32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: float32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: float32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115000,23 +64507,22 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); +pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115026,28 +64532,22 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { - let a: int32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: int32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: int32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115057,23 +64557,30 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); +pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115083,28 +64590,22 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { - let a: uint32x2_t = simd_shuffle!(a, a, [0, 1]); - let b: uint32x2_t = simd_shuffle!(b, b, [0, 1]); - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - let mut ret_val: uint32x2x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1]); - ret_val +pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115114,23 +64615,22 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115140,28 +64640,22 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { - let a: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: int8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115171,23 +64665,22 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115197,28 +64690,30 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { - let a: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: int16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115228,23 +64723,22 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115254,28 +64748,22 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { - let a: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: uint8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115285,23 +64773,22 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115311,28 +64798,22 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { - let a: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: uint16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115342,23 +64823,30 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115368,28 +64856,22 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { - let a: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly8x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: poly8x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(uzp) )] #[cfg_attr( not(target_arch = "arm"), @@ -115399,20 +64881,19 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115425,25 +64906,19 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { - let a: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: poly16x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: poly16x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115456,20 +64931,19 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115482,25 +64956,19 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { - let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: float32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115513,28 +64981,19 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a0: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: int8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); +pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115547,41 +65006,19 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { - let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a0: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: int8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); - let mut ret_val: int8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val +pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115594,20 +65031,19 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115620,25 +65056,19 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { - let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: int16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115651,20 +65081,19 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip) @@ -115677,22 +65106,16 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { - let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: int32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115708,25 +65131,16 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); +pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115742,38 +65156,24 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { - let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let a0: uint8x16_t = simd_shuffle!( +pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + let a0: int8x16_t = simd_shuffle!( a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ); - let b0: uint8x16_t = simd_shuffle!( + let b0: int8x16_t = simd_shuffle!( a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - let mut ret_val: uint8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115789,17 +65189,16 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); +pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115815,22 +65214,16 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { - let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: uint16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val +pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115846,17 +65239,24 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); +pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115872,22 +65272,16 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { - let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); - let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - let mut ret_val: uint32x4x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3]); - ret_val +pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + transmute((a0, b0)) } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115903,17 +65297,9 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a0: poly8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: poly8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); +pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } #[doc = "Zip vectors"] @@ -115921,7 +65307,6 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -115938,8 +65323,6 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { - let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let a0: poly8x16_t = simd_shuffle!( a, b, @@ -115950,43 +65333,6 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - let mut ret_val: poly8x16x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!( - ret_val.0, - ret_val.0, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val.1 = simd_shuffle!( - ret_val.1, - ret_val.1, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - ); - ret_val -} -#[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon instrinsic unsafe"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } #[doc = "Zip vectors"] @@ -115994,7 +65340,6 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[doc = "## Safety"] #[doc = " * Neon instrinsic unsafe"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -116011,12 +65356,7 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { - let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); - let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - let mut ret_val: poly16x8x2_t = transmute((a0, b0)); - ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [0, 1, 2, 3, 4, 5, 6, 7]); - ret_val + transmute((a0, b0)) } diff --git a/crates/stdarch-gen-arm/src/big_endian.rs b/crates/stdarch-gen-arm/src/big_endian.rs index 5bf1a720ea..da438586e4 100644 --- a/crates/stdarch-gen-arm/src/big_endian.rs +++ b/crates/stdarch-gen-arm/src/big_endian.rs @@ -30,27 +30,15 @@ pub fn create_symbol_identifier(arbitrary_string: &str) -> Expression { /// ``` /// [0, 1, 2, 3] /// ``` -fn create_array(lanes: u32, reverse: bool) -> Option { - if reverse { - match lanes { - 1 => None, /* Makes no sense to shuffle an array of size 1 */ - 2 => Some("[1, 0]".to_string()), - 3 => Some("[2, 1, 0]".to_string()), - 4 => Some("[3, 2, 1, 0]".to_string()), - 8 => Some("[7, 6, 5, 4, 3, 2, 1, 0]".to_string()), - 16 => Some("[15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]".to_string()), - _ => panic!("Incorrect vector number of vector lanes: {}", lanes), - } - } else { - match lanes { - 1 => None, /* Makes no sense to shuffle an array of size 1 */ - 2 => Some("[0, 1]".to_string()), - 3 => Some("[0, 1, 2]".to_string()), - 4 => Some("[0, 1, 2, 3]".to_string()), - 8 => Some("[0, 1, 2, 3, 4, 5, 6, 7]".to_string()), - 16 => Some("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]".to_string()), - _ => panic!("Incorrect vector number of vector lanes: {}", lanes), - } +fn create_array(lanes: u32) -> Option { + match lanes { + 1 => None, /* Makes no sense to shuffle an array of size 1 */ + 2 => Some("[1, 0]".to_string()), + 3 => Some("[2, 1, 0]".to_string()), + 4 => Some("[3, 2, 1, 0]".to_string()), + 8 => Some("[7, 6, 5, 4, 3, 2, 1, 0]".to_string()), + 16 => Some("[15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]".to_string()), + _ => panic!("Incorrect vector number of vector lanes: {}", lanes), } } @@ -118,7 +106,6 @@ pub fn make_variable_mutable(variable_name: &str, type_kind: &TypeKind) -> Expre fn create_shuffle_internal( variable_name: &String, type_kind: &TypeKind, - reverse: bool, fmt_tuple: fn(variable_name: &String, idx: u32, array_lanes: &String) -> String, fmt: fn(variable_name: &String, type_kind: &TypeKind, array_lanes: &String) -> String, ) -> Option { @@ -127,7 +114,7 @@ fn create_shuffle_internal( }; let lane_count = vector_type.lanes(); - let Some(array_lanes) = create_array(lane_count, reverse) else { + let Some(array_lanes) = create_array(lane_count) else { return None; }; @@ -194,27 +181,20 @@ fn create_shuffle_call_fmt( pub fn create_assigned_shuffle_call( variable_name: &String, type_kind: &TypeKind, - reverse: bool, ) -> Option { create_shuffle_internal( variable_name, type_kind, - reverse, create_assigned_tuple_shuffle_call_fmt, create_assigned_shuffle_call_fmt, ) } /// Create a `simd_shuffle!(<...>, [...])` call -pub fn create_shuffle_call( - variable_name: &String, - type_kind: &TypeKind, - reverse: bool, -) -> Option { +pub fn create_shuffle_call(variable_name: &String, type_kind: &TypeKind) -> Option { create_shuffle_internal( variable_name, type_kind, - reverse, create_assigned_tuple_shuffle_call_fmt, create_shuffle_call_fmt, ) diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index 4d13c27685..ad833f52ad 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -915,8 +915,8 @@ pub struct Intrinsic { /// Big endian variant for composing, this gets populated internally #[serde(skip)] pub big_endian_compose: Vec, - /// Big endian sometimes needs the bits inverted from the default reverse - /// to work correctly + /// Big endian sometimes needs the bits inverted in a way that cannot be + /// automatically detected #[serde(default)] pub big_endian_inverse: Option, } @@ -1043,9 +1043,9 @@ impl Intrinsic { /// Add a big endian implementation fn generate_big_endian(&self, variant: &mut Intrinsic) { - /* We can't always blindly reverse the bits we sometimes need a - * different order - thus this allows us to have the ability to do so - * without having to play codegolf witht the yaml AST */ + /* We can't always blindly reverse the bits only in certain conditions + * do we need a different order - thus this allows us to have the + * ability to do so without having to play codegolf with the yaml AST */ let should_reverse = { if let Some(should_reverse) = variant.big_endian_inverse { should_reverse @@ -1059,6 +1059,10 @@ impl Intrinsic { } }; + if !should_reverse { + return; + } + let mut big_endian_expressions: Vec = Vec::new(); /* We cannot assign `a.0 = ` directly to a function parameter so @@ -1087,7 +1091,6 @@ impl Intrinsic { if let Some(shuffle_call) = create_assigned_shuffle_call( &function_parameter.name.to_string(), &function_parameter.kind, - should_reverse, ) { big_endian_expressions.push(shuffle_call); } @@ -1144,9 +1147,7 @@ impl Intrinsic { * as in code we are making the final call before caputuring the return * value of the intrinsic that has been called.*/ let ret_val_name = "ret_val".to_string(); - if let Some(simd_shuffle_call) = - create_shuffle_call(&ret_val_name, return_type, should_reverse) - { + if let Some(simd_shuffle_call) = create_shuffle_call(&ret_val_name, return_type) { /* There is a possibility that the funcion arguments did not * require big endian treatment, thus we need to now add the * original function body before appending the return value.*/